aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ide/ide.txt2
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/lguest/Makefile3
-rw-r--r--Documentation/lguest/lguest.c1008
-rw-r--r--Documentation/lguest/lguest.txt1
-rw-r--r--Documentation/power/devices.txt34
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt36
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt18
-rw-r--r--Documentation/sound/alsa/Procfile.txt36
-rw-r--r--Documentation/sound/alsa/README.maya44163
-rw-r--r--Documentation/sound/alsa/soc/dapm.txt1
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/alpha/include/asm/suspend.h6
-rw-r--r--arch/alpha/mm/extable.c21
-rw-r--r--arch/arm/include/asm/suspend.h4
-rw-r--r--arch/avr32/kernel/module.c2
-rw-r--r--arch/blackfin/Kconfig84
-rw-r--r--arch/blackfin/Kconfig.debug13
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig101
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig219
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig211
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig103
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig105
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig109
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig106
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig253
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig116
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig4
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig29
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig12
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig499
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig12
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig12
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig12
-rw-r--r--arch/blackfin/configs/H8606_defconfig8
-rw-r--r--arch/blackfin/configs/IP0X_defconfig8
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig39
-rw-r--r--arch/blackfin/configs/SRV1_defconfig4
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig27
-rw-r--r--arch/blackfin/include/asm/cacheflush.h40
-rw-r--r--arch/blackfin/include/asm/cplb.h35
-rw-r--r--arch/blackfin/include/asm/dma.h10
-rw-r--r--arch/blackfin/include/asm/elf.h84
-rw-r--r--arch/blackfin/include/asm/entry.h92
-rw-r--r--arch/blackfin/include/asm/gptimers.h43
-rw-r--r--arch/blackfin/include/asm/io.h29
-rw-r--r--arch/blackfin/include/asm/ipipe.h4
-rw-r--r--arch/blackfin/include/asm/pda.h2
-rw-r--r--arch/blackfin/include/asm/processor.h4
-rw-r--r--arch/blackfin/include/asm/time.h1
-rw-r--r--arch/blackfin/include/asm/uaccess.h32
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c86
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c18
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c1
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cacheinit.c9
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cacheinit.c9
-rw-r--r--arch/blackfin/kernel/early_printk.c16
-rw-r--r--arch/blackfin/kernel/gptimers.c30
-rw-r--r--arch/blackfin/kernel/ipipe.c9
-rw-r--r--arch/blackfin/kernel/irqchip.c2
-rw-r--r--arch/blackfin/kernel/kgdb.c60
-rw-r--r--arch/blackfin/kernel/module.c22
-rw-r--r--arch/blackfin/kernel/process.c3
-rw-r--r--arch/blackfin/kernel/setup.c79
-rw-r--r--arch/blackfin/kernel/sys_bfin.c5
-rw-r--r--arch/blackfin/kernel/time-ts.c222
-rw-r--r--arch/blackfin/kernel/time.c53
-rw-r--r--arch/blackfin/kernel/traps.c52
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S5
-rw-r--r--arch/blackfin/mach-bf518/Kconfig1
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c19
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h35
-rw-r--r--arch/blackfin/mach-bf518/include/mach/portmux.h6
-rw-r--r--arch/blackfin/mach-bf527/Kconfig1
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c10
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c10
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c28
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h168
-rw-r--r--arch/blackfin/mach-bf533/Kconfig1
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c6
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h88
-rw-r--r--arch/blackfin/mach-bf537/Kconfig1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537.c4
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c8
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c69
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c4
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h84
-rw-r--r--arch/blackfin/mach-bf538/Kconfig1
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h60
-rw-r--r--arch/blackfin/mach-bf538/include/mach/blackfin.h19
-rw-r--r--arch/blackfin/mach-bf538/include/mach/cdefBF538.h65
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h56
-rw-r--r--arch/blackfin/mach-bf548/Kconfig8
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c85
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h44
-rw-r--r--arch/blackfin/mach-bf548/include/mach/portmux.h64
-rw-r--r--arch/blackfin/mach-bf561/Kconfig16
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c4
-rw-r--r--arch/blackfin/mach-bf561/coreb.c396
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h25
-rw-r--r--arch/blackfin/mach-bf561/include/mach/cdefBF561.h29
-rw-r--r--arch/blackfin/mach-bf561/include/mach/defBF561.h56
-rw-r--r--arch/blackfin/mach-bf561/smp.c4
-rw-r--r--arch/blackfin/mach-common/arch_checks.c7
-rw-r--r--arch/blackfin/mach-common/cache.S46
-rw-r--r--arch/blackfin/mach-common/clocks-init.c1
-rw-r--r--arch/blackfin/mach-common/cpufreq.c3
-rw-r--r--arch/blackfin/mach-common/entry.S76
-rw-r--r--arch/blackfin/mach-common/head.S34
-rw-r--r--arch/blackfin/mach-common/interrupt.S27
-rw-r--r--arch/blackfin/mach-common/ints-priority.c50
-rw-r--r--arch/blackfin/mach-common/smp.c12
-rw-r--r--arch/blackfin/mm/blackfin_sram.h1
-rw-r--r--arch/blackfin/mm/init.c33
-rw-r--r--arch/blackfin/mm/isram-driver.c2
-rw-r--r--arch/blackfin/mm/sram-alloc.c21
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/frv/kernel/module.c2
-rw-r--r--arch/h8300/kernel/module.c2
-rw-r--r--arch/ia64/include/asm/suspend.h1
-rw-r--r--arch/ia64/mm/extable.c26
-rw-r--r--arch/m32r/kernel/module.c2
-rw-r--r--arch/m68k/include/asm/suspend.h6
-rw-r--r--arch/m68k/kernel/module.c2
-rw-r--r--arch/m68knommu/kernel/module.c2
-rw-r--r--arch/microblaze/Kconfig121
-rw-r--r--arch/microblaze/Makefile6
-rw-r--r--arch/microblaze/boot/Makefile2
-rw-r--r--arch/microblaze/configs/mmu_defconfig798
-rw-r--r--arch/microblaze/include/asm/Kbuild25
-rw-r--r--arch/microblaze/include/asm/cacheflush.h20
-rw-r--r--arch/microblaze/include/asm/checksum.h14
-rw-r--r--arch/microblaze/include/asm/current.h8
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h130
-rw-r--r--arch/microblaze/include/asm/dma.h5
-rw-r--r--arch/microblaze/include/asm/elf.h93
-rw-r--r--arch/microblaze/include/asm/entry.h37
-rw-r--r--arch/microblaze/include/asm/exceptions.h24
-rw-r--r--arch/microblaze/include/asm/flat.h1
-rw-r--r--arch/microblaze/include/asm/gpio.h6
-rw-r--r--arch/microblaze/include/asm/io.h31
-rw-r--r--arch/microblaze/include/asm/mmu.h104
-rw-r--r--arch/microblaze/include/asm/mmu_context.h26
-rw-r--r--arch/microblaze/include/asm/mmu_context_mm.h140
-rw-r--r--arch/microblaze/include/asm/mmu_context_no.h23
-rw-r--r--arch/microblaze/include/asm/page.h166
-rw-r--r--arch/microblaze/include/asm/pgalloc.h191
-rw-r--r--arch/microblaze/include/asm/pgtable.h538
-rw-r--r--arch/microblaze/include/asm/posix_types.h2
-rw-r--r--arch/microblaze/include/asm/processor.h95
-rw-r--r--arch/microblaze/include/asm/ptrace.h1
-rw-r--r--arch/microblaze/include/asm/registers.h21
-rw-r--r--arch/microblaze/include/asm/sections.h3
-rw-r--r--arch/microblaze/include/asm/segment.h20
-rw-r--r--arch/microblaze/include/asm/setup.h10
-rw-r--r--arch/microblaze/include/asm/stat.h77
-rw-r--r--arch/microblaze/include/asm/string.h2
-rw-r--r--arch/microblaze/include/asm/syscalls.h3
-rw-r--r--arch/microblaze/include/asm/thread_info.h20
-rw-r--r--arch/microblaze/include/asm/tlb.h8
-rw-r--r--arch/microblaze/include/asm/tlbflush.h48
-rw-r--r--arch/microblaze/include/asm/uaccess.h305
-rw-r--r--arch/microblaze/include/asm/unaligned.h3
-rw-r--r--arch/microblaze/kernel/Makefile1
-rw-r--r--arch/microblaze/kernel/asm-offsets.c21
-rw-r--r--arch/microblaze/kernel/early_printk.c3
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/entry.S1116
-rw-r--r--arch/microblaze/kernel/exceptions.c45
-rw-r--r--arch/microblaze/kernel/head.S190
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S746
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c2
-rw-r--r--arch/microblaze/kernel/misc.S120
-rw-r--r--arch/microblaze/kernel/process.c59
-rw-r--r--arch/microblaze/kernel/prom.c7
-rw-r--r--arch/microblaze/kernel/setup.c62
-rw-r--r--arch/microblaze/kernel/signal.c109
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/traps.c42
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S5
-rw-r--r--arch/microblaze/lib/Makefile3
-rw-r--r--arch/microblaze/lib/checksum.c31
-rw-r--r--arch/microblaze/lib/memcpy.c5
-rw-r--r--arch/microblaze/lib/uaccess_old.S135
-rw-r--r--arch/microblaze/mm/Makefile2
-rw-r--r--arch/microblaze/mm/fault.c304
-rw-r--r--arch/microblaze/mm/init.c169
-rw-r--r--arch/microblaze/mm/mmu_context.c70
-rw-r--r--arch/microblaze/mm/pgtable.c286
-rw-r--r--arch/mips/include/asm/suspend.h6
-rw-r--r--arch/mips/kernel/module.c2
-rw-r--r--arch/mn10300/kernel/module.c2
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h11
-rw-r--r--arch/powerpc/kernel/module.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c12
-rw-r--r--arch/s390/include/asm/suspend.h5
-rw-r--r--arch/s390/kernel/module.c2
-rw-r--r--arch/sh/kernel/module.c2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h3
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/mm/extable.c29
-rw-r--r--arch/um/include/asm/pgtable.h7
-rw-r--r--arch/um/include/asm/suspend.h4
-rw-r--r--arch/um/sys-i386/Makefile2
-rw-r--r--arch/um/sys-x86_64/Makefile4
-rw-r--r--arch/um/sys-x86_64/um_module.c21
-rw-r--r--arch/x86/include/asm/lguest.h7
-rw-r--r--arch/x86/include/asm/lguest_hcall.h15
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h4
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apm_32.c14
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c7
-rw-r--r--arch/x86/kernel/module.c (renamed from arch/x86/kernel/module_64.c)82
-rw-r--r--arch/x86/kernel/module_32.c152
-rw-r--r--arch/x86/kernel/setup.c15
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/lguest/boot.c158
-rw-r--r--arch/x86/lguest/i386_head.S60
-rw-r--r--arch/x86/mm/init_32.c10
-rw-r--r--arch/x86/power/Makefile2
-rw-r--r--arch/x86/power/cpu.c (renamed from arch/x86/power/cpu_64.c)165
-rw-r--r--arch/x86/power/cpu_32.c148
-rw-r--r--arch/xtensa/kernel/module.c2
-rw-r--r--drivers/base/firmware_class.c129
-rw-r--r--drivers/base/platform.c36
-rw-r--r--drivers/base/power/main.c94
-rw-r--r--drivers/base/sys.c16
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/hw_random/virtio-rng.c30
-rw-r--r--drivers/char/virtio_console.c26
-rw-r--r--drivers/char/vt.c1
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_bufs.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c74
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_hashtab.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c165
-rw-r--r--drivers/gpu/drm/drm_modes.c18
-rw-r--r--drivers/gpu/drm/drm_stub.c17
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c67
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h48
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c153
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c152
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c190
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h616
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h101
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_display.c645
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c151
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c110
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
-rw-r--r--drivers/ide/at91_ide.c7
-rw-r--r--drivers/ide/au1xxx-ide.c8
-rw-r--r--drivers/ide/buddha.c9
-rw-r--r--drivers/ide/cmd640.c7
-rw-r--r--drivers/ide/cs5520.c4
-rw-r--r--drivers/ide/delkin_cb.c6
-rw-r--r--drivers/ide/falconide.c9
-rw-r--r--drivers/ide/gayle.c9
-rw-r--r--drivers/ide/hpt366.c25
-rw-r--r--drivers/ide/icside.c77
-rw-r--r--drivers/ide/ide-4drives.c6
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cs.c6
-rw-r--r--drivers/ide/ide-disk.c75
-rw-r--r--drivers/ide/ide-dma.c1
-rw-r--r--drivers/ide/ide-eh.c14
-rw-r--r--drivers/ide/ide-gd.c14
-rw-r--r--drivers/ide/ide-generic.c7
-rw-r--r--drivers/ide/ide-h8300.c10
-rw-r--r--drivers/ide/ide-io.c77
-rw-r--r--drivers/ide/ide-iops.c26
-rw-r--r--drivers/ide/ide-legacy.c7
-rw-r--r--drivers/ide/ide-pnp.c6
-rw-r--r--drivers/ide/ide-probe.c95
-rw-r--r--drivers/ide/ide-tape.c90
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/ide.c10
-rw-r--r--drivers/ide/ide_platform.c9
-rw-r--r--drivers/ide/macide.c9
-rw-r--r--drivers/ide/palm_bk3710.c6
-rw-r--r--drivers/ide/pdc202xx_new.c26
-rw-r--r--drivers/ide/pdc202xx_old.c92
-rw-r--r--drivers/ide/pmac.c13
-rw-r--r--drivers/ide/q40ide.c11
-rw-r--r--drivers/ide/rapide.c8
-rw-r--r--drivers/ide/scc_pata.c6
-rw-r--r--drivers/ide/setup-pci.c85
-rw-r--r--drivers/ide/sgiioc4.c7
-rw-r--r--drivers/ide/siimage.c4
-rw-r--r--drivers/ide/sl82c105.c9
-rw-r--r--drivers/ide/tx4938ide.c5
-rw-r--r--drivers/ide/tx4939ide.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/core.c30
-rw-r--r--drivers/lguest/hypercalls.c14
-rw-r--r--drivers/lguest/interrupts_and_traps.c57
-rw-r--r--drivers/lguest/lg.h28
-rw-r--r--drivers/lguest/lguest_device.c41
-rw-r--r--drivers/lguest/lguest_user.c127
-rw-r--r--drivers/lguest/page_tables.c396
-rw-r--r--drivers/lguest/segments.c2
-rw-r--r--drivers/message/fusion/mptbase.c1571
-rw-r--r--drivers/message/fusion/mptbase.h180
-rw-r--r--drivers/message/fusion/mptctl.c692
-rw-r--r--drivers/message/fusion/mptdebug.h3
-rw-r--r--drivers/message/fusion/mptfc.c15
-rw-r--r--drivers/message/fusion/mptsas.c3114
-rw-r--r--drivers/message/fusion/mptsas.h41
-rw-r--r--drivers/message/fusion/mptscsih.c1329
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c71
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bnx2.c193
-rw-r--r--drivers/net/bnx2.h18
-rw-r--r--drivers/net/cnic.c2711
-rw-r--r--drivers/net/cnic.h299
-rw-r--r--drivers/net/cnic_defs.h580
-rw-r--r--drivers/net/cnic_if.h299
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/pnp/resource.c18
-rw-r--r--drivers/s390/kvm/kvm_virtio.c43
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c30
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h7
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c29
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c13
-rw-r--r--drivers/scsi/Kconfig31
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h155
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1509
-rw-r--r--drivers/scsi/bnx2i/Kconfig7
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2405
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c438
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2064
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c142
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c26
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c23
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c95
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/fcoe/libfcoe.c21
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/gdth_proc.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c434
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h68
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/libiscsi.c468
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/lpfc/lpfc.h123
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c250
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c275
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1365
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h142
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c674
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c930
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6683
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c62
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c32
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c363
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c36
-rw-r--r--drivers/scsi/mvsas.c3222
-rw-r--r--drivers/scsi/mvsas/Kconfig42
-rw-r--r--drivers/scsi/mvsas/Makefile32
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c793
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h151
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h280
-rw-r--r--drivers/scsi/mvsas/mv_defs.h502
-rw-r--r--drivers/scsi/mvsas/mv_init.c703
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2154
-rw-r--r--drivers/scsi/mvsas/mv_sas.h406
-rw-r--r--drivers/scsi/osd/Kbuild25
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_initiator.c83
-rw-r--r--drivers/scsi/osd/osd_uld.c66
-rw-r--r--drivers/scsi/qla1280.c387
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c227
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c240
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c244
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c173
-rw-r--r--drivers/scsi/sd.c45
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c66
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c49
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/aty/aty128fb.c2
-rw-r--r--drivers/video/cyber2000fb.c9
-rw-r--r--drivers/video/uvesafb.c10
-rw-r--r--drivers/virtio/virtio.c29
-rw-r--r--drivers/virtio/virtio_balloon.c27
-rw-r--r--drivers/virtio/virtio_pci.c307
-rw-r--r--drivers/virtio/virtio_ring.c102
-rw-r--r--drivers/xen/manage.c16
-rw-r--r--fs/Kconfig10
-rw-r--r--fs/dlm/dir.c7
-rw-r--r--fs/dlm/lockspace.c17
-rw-r--r--fs/dlm/lowcomms.c22
-rw-r--r--fs/dlm/lowcomms.h3
-rw-r--r--fs/dlm/member.c19
-rw-r--r--fs/dlm/requestqueue.c2
-rw-r--r--fs/eventfd.c3
-rw-r--r--fs/exofs/common.h6
-rw-r--r--fs/exofs/inode.c8
-rw-r--r--fs/exofs/osd.c26
-rw-r--r--fs/fuse/Makefile1
-rw-r--r--fs/fuse/cuse.c610
-rw-r--r--fs/fuse/dev.c15
-rw-r--r--fs/fuse/dir.c33
-rw-r--r--fs/fuse/file.c346
-rw-r--r--fs/fuse/fuse_i.h47
-rw-r--r--fs/fuse/inode.c118
-rw-r--r--fs/gfs2/Makefile1
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/gfs2/glock.c12
-rw-r--r--fs/gfs2/log.c9
-rw-r--r--fs/gfs2/lops.c3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c11
-rw-r--r--fs/gfs2/super.c4
-rw-r--r--fs/gfs2/trace_gfs2.h407
-rw-r--r--fs/partitions/check.c42
-rw-r--r--include/drm/drmP.h126
-rw-r--r--include/drm/drm_hashtab.h2
-rw-r--r--include/drm/drm_mm.h90
-rw-r--r--include/drm/drm_pciids.h9
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/dlm.h4
-rw-r--r--include/linux/fuse.h31
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/ide.h46
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/lguest_launcher.h3
-rw-r--r--include/linux/module.h1
-rw-r--r--include/linux/moduleparam.h40
-rw-r--r--include/linux/page_cgroup.h18
-rw-r--r--include/linux/pci_ids.h12
-rw-r--r--include/linux/perf_counter.h22
-rw-r--r--include/linux/pm.h11
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slob_def.h5
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/suspend.h18
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/virtio.h15
-rw-r--r--include/linux/virtio_config.h49
-rw-r--r--include/linux/virtio_pci.h10
-rw-r--r--include/linux/virtio_ring.h8
-rw-r--r--include/scsi/fc/fc_fip.h7
-rw-r--r--include/scsi/iscsi_if.h49
-rw-r--r--include/scsi/libfc.h1
-rw-r--r--include/scsi/libiscsi.h8
-rw-r--r--include/scsi/osd_attributes.h74
-rw-r--r--include/scsi/osd_initiator.h14
-rw-r--r--include/scsi/osd_protocol.h8
-rw-r--r--include/scsi/scsi_transport_iscsi.h8
-rw-r--r--include/sound/asound.h1
-rw-r--r--include/sound/core.h11
-rw-r--r--include/sound/driver.h1
-rw-r--r--include/sound/pcm.h76
-rw-r--r--include/sound/soc-dai.h30
-rw-r--r--include/sound/soc-dapm.h24
-rw-r--r--include/sound/soc.h34
-rw-r--r--include/sound/wm9081.h25
-rw-r--r--init/Kconfig2
-rw-r--r--init/main.c6
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/kexec.c14
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/params.c46
-rw-r--r--kernel/perf_counter.c95
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/hibernate.c (renamed from kernel/power/disk.c)34
-rw-r--r--kernel/power/hibernate_nvs.c135
-rw-r--r--kernel/power/main.c521
-rw-r--r--kernel/power/power.h25
-rw-r--r--kernel/power/snapshot.c80
-rw-r--r--kernel/power/suspend.c300
-rw-r--r--kernel/power/suspend_test.c187
-rw-r--r--kernel/power/swsusp.c198
-rw-r--r--kernel/sched.c1
-rw-r--r--lib/extable.c21
-rw-r--r--mm/page_cgroup.c29
-rw-r--r--mm/slab.c41
-rw-r--r--mm/slub.c16
-rw-r--r--mm/vmscan.c4
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--sound/aoa/fabrics/layout.c8
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c8
-rw-r--r--sound/core/Kconfig2
-rw-r--r--sound/core/init.c61
-rw-r--r--sound/core/jack.c2
-rw-r--r--sound/core/oss/pcm_oss.c5
-rw-r--r--sound/core/pcm_lib.c92
-rw-r--r--sound/core/pcm_native.c23
-rw-r--r--sound/core/seq/Kconfig16
-rw-r--r--sound/core/seq/Makefile18
-rw-r--r--sound/drivers/opl3/Makefile10
-rw-r--r--sound/drivers/opl4/Makefile10
-rw-r--r--sound/isa/Kconfig7
-rw-r--r--sound/isa/es1688/es1688.c2
-rw-r--r--sound/isa/gus/gusextreme.c2
-rw-r--r--sound/isa/sb/Makefile10
-rw-r--r--sound/isa/sc6000.c134
-rw-r--r--sound/mips/sgio2audio.c3
-rw-r--r--sound/parisc/harmony.c4
-rw-r--r--sound/pci/Kconfig27
-rw-r--r--sound/pci/Makefile2
-rw-r--r--sound/pci/au88x0/au88x0_core.c10
-rw-r--r--sound/pci/bt87x.c2
-rw-r--r--sound/pci/ca0106/ca0106_main.c1
-rw-r--r--sound/pci/ca0106/ca0106_mixer.c10
-rw-r--r--sound/pci/ctxfi/Makefile5
-rw-r--r--sound/pci/ctxfi/ct20k1reg.h636
-rw-r--r--sound/pci/ctxfi/ct20k2reg.h85
-rw-r--r--sound/pci/ctxfi/ctamixer.c488
-rw-r--r--sound/pci/ctxfi/ctamixer.h96
-rw-r--r--sound/pci/ctxfi/ctatc.c1619
-rw-r--r--sound/pci/ctxfi/ctatc.h147
-rw-r--r--sound/pci/ctxfi/ctdaio.c769
-rw-r--r--sound/pci/ctxfi/ctdaio.h122
-rw-r--r--sound/pci/ctxfi/cthardware.c91
-rw-r--r--sound/pci/ctxfi/cthardware.h196
-rw-r--r--sound/pci/ctxfi/cthw20k1.c2248
-rw-r--r--sound/pci/ctxfi/cthw20k1.h26
-rw-r--r--sound/pci/ctxfi/cthw20k2.c2137
-rw-r--r--sound/pci/ctxfi/cthw20k2.h26
-rw-r--r--sound/pci/ctxfi/ctimap.c112
-rw-r--r--sound/pci/ctxfi/ctimap.h40
-rw-r--r--sound/pci/ctxfi/ctmixer.c1123
-rw-r--r--sound/pci/ctxfi/ctmixer.h67
-rw-r--r--sound/pci/ctxfi/ctpcm.c426
-rw-r--r--sound/pci/ctxfi/ctpcm.h27
-rw-r--r--sound/pci/ctxfi/ctresource.c301
-rw-r--r--sound/pci/ctxfi/ctresource.h72
-rw-r--r--sound/pci/ctxfi/ctsrc.c886
-rw-r--r--sound/pci/ctxfi/ctsrc.h149
-rw-r--r--sound/pci/ctxfi/cttimer.c441
-rw-r--r--sound/pci/ctxfi/cttimer.h29
-rw-r--r--sound/pci/ctxfi/ctvmem.c250
-rw-r--r--sound/pci/ctxfi/ctvmem.h61
-rw-r--r--sound/pci/ctxfi/xfi.c142
-rw-r--r--sound/pci/emu10k1/Makefile10
-rw-r--r--sound/pci/emu10k1/emu10k1x.c1
-rw-r--r--sound/pci/emu10k1/emupcm.c2
-rw-r--r--sound/pci/hda/Kconfig13
-rw-r--r--sound/pci/hda/Makefile4
-rw-r--r--sound/pci/hda/hda_beep.c55
-rw-r--r--sound/pci/hda/hda_beep.h5
-rw-r--r--sound/pci/hda/hda_codec.c239
-rw-r--r--sound/pci/hda/hda_codec.h13
-rw-r--r--sound/pci/hda/hda_hwdep.c9
-rw-r--r--sound/pci/hda/hda_intel.c197
-rw-r--r--sound/pci/hda/hda_proc.c8
-rw-r--r--sound/pci/hda/patch_ca0110.c573
-rw-r--r--sound/pci/hda/patch_nvhdmi.c279
-rw-r--r--sound/pci/hda/patch_realtek.c2328
-rw-r--r--sound/pci/hda/patch_sigmatel.c278
-rw-r--r--sound/pci/hda/patch_via.c111
-rw-r--r--sound/pci/ice1712/Makefile2
-rw-r--r--sound/pci/ice1712/ice1712.h12
-rw-r--r--sound/pci/ice1712/ice1724.c96
-rw-r--r--sound/pci/ice1712/maya44.c779
-rw-r--r--sound/pci/ice1712/maya44.h10
-rw-r--r--sound/pci/lx6464es/Makefile2
-rw-r--r--sound/pci/lx6464es/lx6464es.c1159
-rw-r--r--sound/pci/lx6464es/lx6464es.h114
-rw-r--r--sound/pci/lx6464es/lx_core.c1444
-rw-r--r--sound/pci/lx6464es/lx_core.h242
-rw-r--r--sound/pci/lx6464es/lx_defs.h376
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c6
-rw-r--r--sound/pci/oxygen/virtuoso.c64
-rw-r--r--sound/pci/riptide/riptide.c347
-rw-r--r--sound/pci/rme9652/hdsp.c11
-rw-r--r--sound/pci/rme9652/hdspm.c4
-rw-r--r--sound/ppc/awacs.c54
-rw-r--r--sound/ppc/beep.c2
-rw-r--r--sound/ppc/burgundy.c26
-rw-r--r--sound/ppc/daca.c2
-rw-r--r--sound/ppc/keywest.c10
-rw-r--r--sound/ppc/pmac.c12
-rw-r--r--sound/ppc/snd_ps3.c655
-rw-r--r--sound/ppc/tumbler.c16
-rw-r--r--sound/soc/Kconfig2
-rw-r--r--sound/soc/Makefile2
-rw-r--r--sound/soc/atmel/Kconfig8
-rw-r--r--sound/soc/atmel/Makefile1
-rw-r--r--sound/soc/atmel/playpaq_wm8510.c2
-rw-r--r--sound/soc/atmel/snd-soc-afeb9260.c203
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c9
-rw-r--r--sound/soc/blackfin/bf5xx-sport.c4
-rw-r--r--sound/soc/codecs/Kconfig24
-rw-r--r--sound/soc/codecs/Makefile12
-rw-r--r--sound/soc/codecs/ac97.c4
-rw-r--r--sound/soc/codecs/ad1980.c4
-rw-r--r--sound/soc/codecs/cs4270.c105
-rw-r--r--sound/soc/codecs/spdif_transciever.c71
-rw-r--r--sound/soc/codecs/spdif_transciever.h17
-rw-r--r--sound/soc/codecs/ssm2602.c33
-rw-r--r--sound/soc/codecs/stac9766.c463
-rw-r--r--sound/soc/codecs/stac9766.h21
-rw-r--r--sound/soc/codecs/tlv320aic23.c16
-rw-r--r--sound/soc/codecs/twl4030.c1116
-rw-r--r--sound/soc/codecs/twl4030.h43
-rw-r--r--sound/soc/codecs/uda134x.c4
-rw-r--r--sound/soc/codecs/wm8350.c2
-rw-r--r--sound/soc/codecs/wm8350.h1
-rw-r--r--sound/soc/codecs/wm8400.c8
-rw-r--r--sound/soc/codecs/wm8510.c2
-rw-r--r--sound/soc/codecs/wm8580.c4
-rw-r--r--sound/soc/codecs/wm8731.c4
-rw-r--r--sound/soc/codecs/wm8753.c6
-rw-r--r--sound/soc/codecs/wm8900.c6
-rw-r--r--sound/soc/codecs/wm8903.c119
-rw-r--r--sound/soc/codecs/wm8940.c955
-rw-r--r--sound/soc/codecs/wm8940.h104
-rw-r--r--sound/soc/codecs/wm8960.c969
-rw-r--r--sound/soc/codecs/wm8960.h127
-rw-r--r--sound/soc/codecs/wm8988.c1097
-rw-r--r--sound/soc/codecs/wm8988.h60
-rw-r--r--sound/soc/codecs/wm8990.c2
-rw-r--r--sound/soc/codecs/wm9081.c1534
-rw-r--r--sound/soc/codecs/wm9081.h787
-rw-r--r--sound/soc/codecs/wm9705.c4
-rw-r--r--sound/soc/codecs/wm9712.c8
-rw-r--r--sound/soc/codecs/wm9713.c48
-rw-r--r--sound/soc/fsl/Kconfig32
-rw-r--r--sound/soc/fsl/Makefile7
-rw-r--r--sound/soc/fsl/efika-audio-fabric.c90
-rw-r--r--sound/soc/fsl/fsl_ssi.c11
-rw-r--r--sound/soc/fsl/mpc5200_dma.c564
-rw-r--r--sound/soc/fsl/mpc5200_dma.h80
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c329
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.h15
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c754
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.h12
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c90
-rw-r--r--sound/soc/omap/Kconfig8
-rw-r--r--sound/soc/omap/Makefile2
-rw-r--r--sound/soc/omap/n810.c7
-rw-r--r--sound/soc/omap/omap-mcbsp.c43
-rw-r--r--sound/soc/omap/omap-pcm.c9
-rw-r--r--sound/soc/omap/omap2evm.c2
-rw-r--r--sound/soc/omap/omap3beagle.c28
-rw-r--r--sound/soc/omap/omap3evm.c147
-rw-r--r--sound/soc/omap/omap3pandora.c4
-rw-r--r--sound/soc/omap/overo.c2
-rw-r--r--sound/soc/omap/sdp3430.c94
-rw-r--r--sound/soc/pxa/Kconfig13
-rw-r--r--sound/soc/pxa/Makefile2
-rw-r--r--sound/soc/pxa/em-x270.c9
-rw-r--r--sound/soc/pxa/imote2.c114
-rw-r--r--sound/soc/pxa/magician.c13
-rw-r--r--sound/soc/pxa/pxa-ssp.c218
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c39
-rw-r--r--sound/soc/s3c24xx/neo1973_wm8753.c16
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.c91
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.c2
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.c157
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.h6
-rw-r--r--sound/soc/s6000/Kconfig19
-rw-r--r--sound/soc/s6000/Makefile11
-rw-r--r--sound/soc/s6000/s6000-i2s.c629
-rw-r--r--sound/soc/s6000/s6000-i2s.h25
-rw-r--r--sound/soc/s6000/s6000-pcm.c497
-rw-r--r--sound/soc/s6000/s6000-pcm.h35
-rw-r--r--sound/soc/s6000/s6105-ipcam.c244
-rw-r--r--sound/soc/sh/ssi.c2
-rw-r--r--sound/soc/soc-core.c165
-rw-r--r--sound/soc/soc-dapm.c427
-rw-r--r--sound/soc/txx9/Kconfig29
-rw-r--r--sound/soc/txx9/Makefile11
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c255
-rw-r--r--sound/soc/txx9/txx9aclc-generic.c98
-rw-r--r--sound/soc/txx9/txx9aclc.c430
-rw-r--r--sound/soc/txx9/txx9aclc.h83
-rw-r--r--sound/synth/Makefile12
-rw-r--r--sound/synth/emux/Makefile12
-rw-r--r--sound/usb/caiaq/audio.c88
-rw-r--r--sound/usb/caiaq/device.c109
-rw-r--r--sound/usb/caiaq/device.h1
-rw-r--r--sound/usb/caiaq/midi.c24
-rw-r--r--sound/usb/usbaudio.c39
-rw-r--r--sound/usb/usbquirks.h43
-rw-r--r--tools/perf/builtin-record.c7
-rw-r--r--tools/perf/design.txt15
-rw-r--r--tools/perf/perf.h5
-rw-r--r--tools/perf/util/parse-events.c2
770 files changed, 93573 insertions, 19301 deletions
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index 0c78f4b1d9d9..e77bebfa7b0d 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -216,6 +216,8 @@ Other kernel parameters for ide_core are:
216 216
217* "noflush=[interface_number.device_number]" to disable flush requests 217* "noflush=[interface_number.device_number]" to disable flush requests
218 218
219* "nohpa=[interface_number.device_number]" to disable Host Protected Area
220
219* "noprobe=[interface_number.device_number]" to skip probing 221* "noprobe=[interface_number.device_number]" to skip probing
220 222
221* "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit 223* "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7bcdebffdab3..0bf8a882ee9e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -887,11 +887,8 @@ and is between 256 and 4096 characters. It is defined in the file
887 887
888 ide-core.nodma= [HW] (E)IDE subsystem 888 ide-core.nodma= [HW] (E)IDE subsystem
889 Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc 889 Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc
890 .vlb_clock .pci_clock .noflush .noprobe .nowerr .cdrom 890 .vlb_clock .pci_clock .noflush .nohpa .noprobe .nowerr
891 .chs .ignore_cable are additional options 891 .cdrom .chs .ignore_cable are additional options
892 See Documentation/ide/ide.txt.
893
894 idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
895 See Documentation/ide/ide.txt. 892 See Documentation/ide/ide.txt.
896 893
897 ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem 894 ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile
index 1f4f9e888bd1..28c8cdfcafd8 100644
--- a/Documentation/lguest/Makefile
+++ b/Documentation/lguest/Makefile
@@ -1,6 +1,5 @@
1# This creates the demonstration utility "lguest" which runs a Linux guest. 1# This creates the demonstration utility "lguest" which runs a Linux guest.
2CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE 2CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
3LDLIBS:=-lz
4 3
5all: lguest 4all: lguest
6 5
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index d36fcc0f2715..9ebcd6ef361b 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -16,6 +16,7 @@
16#include <sys/types.h> 16#include <sys/types.h>
17#include <sys/stat.h> 17#include <sys/stat.h>
18#include <sys/wait.h> 18#include <sys/wait.h>
19#include <sys/eventfd.h>
19#include <fcntl.h> 20#include <fcntl.h>
20#include <stdbool.h> 21#include <stdbool.h>
21#include <errno.h> 22#include <errno.h>
@@ -59,7 +60,6 @@ typedef uint8_t u8;
59/*:*/ 60/*:*/
60 61
61#define PAGE_PRESENT 0x7 /* Present, RW, Execute */ 62#define PAGE_PRESENT 0x7 /* Present, RW, Execute */
62#define NET_PEERNUM 1
63#define BRIDGE_PFX "bridge:" 63#define BRIDGE_PFX "bridge:"
64#ifndef SIOCBRADDIF 64#ifndef SIOCBRADDIF
65#define SIOCBRADDIF 0x89a2 /* add interface to bridge */ 65#define SIOCBRADDIF 0x89a2 /* add interface to bridge */
@@ -76,19 +76,12 @@ static bool verbose;
76 do { if (verbose) printf(args); } while(0) 76 do { if (verbose) printf(args); } while(0)
77/*:*/ 77/*:*/
78 78
79/* File descriptors for the Waker. */
80struct {
81 int pipe[2];
82 int lguest_fd;
83} waker_fds;
84
85/* The pointer to the start of guest memory. */ 79/* The pointer to the start of guest memory. */
86static void *guest_base; 80static void *guest_base;
87/* The maximum guest physical address allowed, and maximum possible. */ 81/* The maximum guest physical address allowed, and maximum possible. */
88static unsigned long guest_limit, guest_max; 82static unsigned long guest_limit, guest_max;
89/* The pipe for signal hander to write to. */ 83/* The /dev/lguest file descriptor. */
90static int timeoutpipe[2]; 84static int lguest_fd;
91static unsigned int timeout_usec = 500;
92 85
93/* a per-cpu variable indicating whose vcpu is currently running */ 86/* a per-cpu variable indicating whose vcpu is currently running */
94static unsigned int __thread cpu_id; 87static unsigned int __thread cpu_id;
@@ -96,11 +89,6 @@ static unsigned int __thread cpu_id;
96/* This is our list of devices. */ 89/* This is our list of devices. */
97struct device_list 90struct device_list
98{ 91{
99 /* Summary information about the devices in our list: ready to pass to
100 * select() to ask which need servicing.*/
101 fd_set infds;
102 int max_infd;
103
104 /* Counter to assign interrupt numbers. */ 92 /* Counter to assign interrupt numbers. */
105 unsigned int next_irq; 93 unsigned int next_irq;
106 94
@@ -126,22 +114,21 @@ struct device
126 /* The linked-list pointer. */ 114 /* The linked-list pointer. */
127 struct device *next; 115 struct device *next;
128 116
129 /* The this device's descriptor, as mapped into the Guest. */ 117 /* The device's descriptor, as mapped into the Guest. */
130 struct lguest_device_desc *desc; 118 struct lguest_device_desc *desc;
131 119
120 /* We can't trust desc values once Guest has booted: we use these. */
121 unsigned int feature_len;
122 unsigned int num_vq;
123
132 /* The name of this device, for --verbose. */ 124 /* The name of this device, for --verbose. */
133 const char *name; 125 const char *name;
134 126
135 /* If handle_input is set, it wants to be called when this file
136 * descriptor is ready. */
137 int fd;
138 bool (*handle_input)(int fd, struct device *me);
139
140 /* Any queues attached to this device */ 127 /* Any queues attached to this device */
141 struct virtqueue *vq; 128 struct virtqueue *vq;
142 129
143 /* Handle status being finalized (ie. feature bits stable). */ 130 /* Is it operational */
144 void (*ready)(struct device *me); 131 bool running;
145 132
146 /* Device-specific data. */ 133 /* Device-specific data. */
147 void *priv; 134 void *priv;
@@ -164,22 +151,28 @@ struct virtqueue
164 /* Last available index we saw. */ 151 /* Last available index we saw. */
165 u16 last_avail_idx; 152 u16 last_avail_idx;
166 153
167 /* The routine to call when the Guest pings us, or timeout. */ 154 /* How many are used since we sent last irq? */
168 void (*handle_output)(int fd, struct virtqueue *me, bool timeout); 155 unsigned int pending_used;
169 156
170 /* Outstanding buffers */ 157 /* Eventfd where Guest notifications arrive. */
171 unsigned int inflight; 158 int eventfd;
172 159
173 /* Is this blocked awaiting a timer? */ 160 /* Function for the thread which is servicing this virtqueue. */
174 bool blocked; 161 void (*service)(struct virtqueue *vq);
162 pid_t thread;
175}; 163};
176 164
177/* Remember the arguments to the program so we can "reboot" */ 165/* Remember the arguments to the program so we can "reboot" */
178static char **main_args; 166static char **main_args;
179 167
180/* Since guest is UP and we don't run at the same time, we don't need barriers. 168/* The original tty settings to restore on exit. */
181 * But I include them in the code in case others copy it. */ 169static struct termios orig_term;
182#define wmb() 170
171/* We have to be careful with barriers: our devices are all run in separate
172 * threads and so we need to make sure that changes visible to the Guest happen
173 * in precise order. */
174#define wmb() __asm__ __volatile__("" : : : "memory")
175#define mb() __asm__ __volatile__("" : : : "memory")
183 176
184/* Convert an iovec element to the given type. 177/* Convert an iovec element to the given type.
185 * 178 *
@@ -245,7 +238,7 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len)
245static u8 *get_feature_bits(struct device *dev) 238static u8 *get_feature_bits(struct device *dev)
246{ 239{
247 return (u8 *)(dev->desc + 1) 240 return (u8 *)(dev->desc + 1)
248 + dev->desc->num_vq * sizeof(struct lguest_vqconfig); 241 + dev->num_vq * sizeof(struct lguest_vqconfig);
249} 242}
250 243
251/*L:100 The Launcher code itself takes us out into userspace, that scary place 244/*L:100 The Launcher code itself takes us out into userspace, that scary place
@@ -505,99 +498,19 @@ static void concat(char *dst, char *args[])
505 * saw the arguments it expects when we looked at initialize() in lguest_user.c: 498 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
506 * the base of Guest "physical" memory, the top physical page to allow and the 499 * the base of Guest "physical" memory, the top physical page to allow and the
507 * entry point for the Guest. */ 500 * entry point for the Guest. */
508static int tell_kernel(unsigned long start) 501static void tell_kernel(unsigned long start)
509{ 502{
510 unsigned long args[] = { LHREQ_INITIALIZE, 503 unsigned long args[] = { LHREQ_INITIALIZE,
511 (unsigned long)guest_base, 504 (unsigned long)guest_base,
512 guest_limit / getpagesize(), start }; 505 guest_limit / getpagesize(), start };
513 int fd;
514
515 verbose("Guest: %p - %p (%#lx)\n", 506 verbose("Guest: %p - %p (%#lx)\n",
516 guest_base, guest_base + guest_limit, guest_limit); 507 guest_base, guest_base + guest_limit, guest_limit);
517 fd = open_or_die("/dev/lguest", O_RDWR); 508 lguest_fd = open_or_die("/dev/lguest", O_RDWR);
518 if (write(fd, args, sizeof(args)) < 0) 509 if (write(lguest_fd, args, sizeof(args)) < 0)
519 err(1, "Writing to /dev/lguest"); 510 err(1, "Writing to /dev/lguest");
520
521 /* We return the /dev/lguest file descriptor to control this Guest */
522 return fd;
523} 511}
524/*:*/ 512/*:*/
525 513
526static void add_device_fd(int fd)
527{
528 FD_SET(fd, &devices.infds);
529 if (fd > devices.max_infd)
530 devices.max_infd = fd;
531}
532
533/*L:200
534 * The Waker.
535 *
536 * With console, block and network devices, we can have lots of input which we
537 * need to process. We could try to tell the kernel what file descriptors to
538 * watch, but handing a file descriptor mask through to the kernel is fairly
539 * icky.
540 *
541 * Instead, we clone off a thread which watches the file descriptors and writes
542 * the LHREQ_BREAK command to the /dev/lguest file descriptor to tell the Host
543 * stop running the Guest. This causes the Launcher to return from the
544 * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset
545 * the LHREQ_BREAK and wake us up again.
546 *
547 * This, of course, is merely a different *kind* of icky.
548 *
549 * Given my well-known antipathy to threads, I'd prefer to use processes. But
550 * it's easier to share Guest memory with threads, and trivial to share the
551 * devices.infds as the Launcher changes it.
552 */
553static int waker(void *unused)
554{
555 /* Close the write end of the pipe: only the Launcher has it open. */
556 close(waker_fds.pipe[1]);
557
558 for (;;) {
559 fd_set rfds = devices.infds;
560 unsigned long args[] = { LHREQ_BREAK, 1 };
561 unsigned int maxfd = devices.max_infd;
562
563 /* We also listen to the pipe from the Launcher. */
564 FD_SET(waker_fds.pipe[0], &rfds);
565 if (waker_fds.pipe[0] > maxfd)
566 maxfd = waker_fds.pipe[0];
567
568 /* Wait until input is ready from one of the devices. */
569 select(maxfd+1, &rfds, NULL, NULL, NULL);
570
571 /* Message from Launcher? */
572 if (FD_ISSET(waker_fds.pipe[0], &rfds)) {
573 char c;
574 /* If this fails, then assume Launcher has exited.
575 * Don't do anything on exit: we're just a thread! */
576 if (read(waker_fds.pipe[0], &c, 1) != 1)
577 _exit(0);
578 continue;
579 }
580
581 /* Send LHREQ_BREAK command to snap the Launcher out of it. */
582 pwrite(waker_fds.lguest_fd, args, sizeof(args), cpu_id);
583 }
584 return 0;
585}
586
587/* This routine just sets up a pipe to the Waker process. */
588static void setup_waker(int lguest_fd)
589{
590 /* This pipe is closed when Launcher dies, telling Waker. */
591 if (pipe(waker_fds.pipe) != 0)
592 err(1, "Creating pipe for Waker");
593
594 /* Waker also needs to know the lguest fd */
595 waker_fds.lguest_fd = lguest_fd;
596
597 if (clone(waker, malloc(4096) + 4096, CLONE_VM | SIGCHLD, NULL) == -1)
598 err(1, "Creating Waker");
599}
600
601/* 514/*
602 * Device Handling. 515 * Device Handling.
603 * 516 *
@@ -623,49 +536,90 @@ static void *_check_pointer(unsigned long addr, unsigned int size,
623/* Each buffer in the virtqueues is actually a chain of descriptors. This 536/* Each buffer in the virtqueues is actually a chain of descriptors. This
624 * function returns the next descriptor in the chain, or vq->vring.num if we're 537 * function returns the next descriptor in the chain, or vq->vring.num if we're
625 * at the end. */ 538 * at the end. */
626static unsigned next_desc(struct virtqueue *vq, unsigned int i) 539static unsigned next_desc(struct vring_desc *desc,
540 unsigned int i, unsigned int max)
627{ 541{
628 unsigned int next; 542 unsigned int next;
629 543
630 /* If this descriptor says it doesn't chain, we're done. */ 544 /* If this descriptor says it doesn't chain, we're done. */
631 if (!(vq->vring.desc[i].flags & VRING_DESC_F_NEXT)) 545 if (!(desc[i].flags & VRING_DESC_F_NEXT))
632 return vq->vring.num; 546 return max;
633 547
634 /* Check they're not leading us off end of descriptors. */ 548 /* Check they're not leading us off end of descriptors. */
635 next = vq->vring.desc[i].next; 549 next = desc[i].next;
636 /* Make sure compiler knows to grab that: we don't want it changing! */ 550 /* Make sure compiler knows to grab that: we don't want it changing! */
637 wmb(); 551 wmb();
638 552
639 if (next >= vq->vring.num) 553 if (next >= max)
640 errx(1, "Desc next is %u", next); 554 errx(1, "Desc next is %u", next);
641 555
642 return next; 556 return next;
643} 557}
644 558
559/* This actually sends the interrupt for this virtqueue */
560static void trigger_irq(struct virtqueue *vq)
561{
562 unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };
563
564 /* Don't inform them if nothing used. */
565 if (!vq->pending_used)
566 return;
567 vq->pending_used = 0;
568
569 /* If they don't want an interrupt, don't send one, unless empty. */
570 if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
571 && lg_last_avail(vq) != vq->vring.avail->idx)
572 return;
573
574 /* Send the Guest an interrupt tell them we used something up. */
575 if (write(lguest_fd, buf, sizeof(buf)) != 0)
576 err(1, "Triggering irq %i", vq->config.irq);
577}
578
645/* This looks in the virtqueue and for the first available buffer, and converts 579/* This looks in the virtqueue and for the first available buffer, and converts
646 * it to an iovec for convenient access. Since descriptors consist of some 580 * it to an iovec for convenient access. Since descriptors consist of some
647 * number of output then some number of input descriptors, it's actually two 581 * number of output then some number of input descriptors, it's actually two
648 * iovecs, but we pack them into one and note how many of each there were. 582 * iovecs, but we pack them into one and note how many of each there were.
649 * 583 *
650 * This function returns the descriptor number found, or vq->vring.num (which 584 * This function returns the descriptor number found. */
651 * is never a valid descriptor number) if none was found. */ 585static unsigned wait_for_vq_desc(struct virtqueue *vq,
652static unsigned get_vq_desc(struct virtqueue *vq, 586 struct iovec iov[],
653 struct iovec iov[], 587 unsigned int *out_num, unsigned int *in_num)
654 unsigned int *out_num, unsigned int *in_num)
655{ 588{
656 unsigned int i, head; 589 unsigned int i, head, max;
657 u16 last_avail; 590 struct vring_desc *desc;
591 u16 last_avail = lg_last_avail(vq);
592
593 while (last_avail == vq->vring.avail->idx) {
594 u64 event;
595
596 /* OK, tell Guest about progress up to now. */
597 trigger_irq(vq);
598
599 /* OK, now we need to know about added descriptors. */
600 vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
601
602 /* They could have slipped one in as we were doing that: make
603 * sure it's written, then check again. */
604 mb();
605 if (last_avail != vq->vring.avail->idx) {
606 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
607 break;
608 }
609
610 /* Nothing new? Wait for eventfd to tell us they refilled. */
611 if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
612 errx(1, "Event read failed?");
613
614 /* We don't need to be notified again. */
615 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
616 }
658 617
659 /* Check it isn't doing very strange things with descriptor numbers. */ 618 /* Check it isn't doing very strange things with descriptor numbers. */
660 last_avail = lg_last_avail(vq);
661 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) 619 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
662 errx(1, "Guest moved used index from %u to %u", 620 errx(1, "Guest moved used index from %u to %u",
663 last_avail, vq->vring.avail->idx); 621 last_avail, vq->vring.avail->idx);
664 622
665 /* If there's nothing new since last we looked, return invalid. */
666 if (vq->vring.avail->idx == last_avail)
667 return vq->vring.num;
668
669 /* Grab the next descriptor number they're advertising, and increment 623 /* Grab the next descriptor number they're advertising, and increment
670 * the index we've seen. */ 624 * the index we've seen. */
671 head = vq->vring.avail->ring[last_avail % vq->vring.num]; 625 head = vq->vring.avail->ring[last_avail % vq->vring.num];
@@ -678,15 +632,28 @@ static unsigned get_vq_desc(struct virtqueue *vq,
678 /* When we start there are none of either input nor output. */ 632 /* When we start there are none of either input nor output. */
679 *out_num = *in_num = 0; 633 *out_num = *in_num = 0;
680 634
635 max = vq->vring.num;
636 desc = vq->vring.desc;
681 i = head; 637 i = head;
638
639 /* If this is an indirect entry, then this buffer contains a descriptor
640 * table which we handle as if it's any normal descriptor chain. */
641 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
642 if (desc[i].len % sizeof(struct vring_desc))
643 errx(1, "Invalid size for indirect buffer table");
644
645 max = desc[i].len / sizeof(struct vring_desc);
646 desc = check_pointer(desc[i].addr, desc[i].len);
647 i = 0;
648 }
649
682 do { 650 do {
683 /* Grab the first descriptor, and check it's OK. */ 651 /* Grab the first descriptor, and check it's OK. */
684 iov[*out_num + *in_num].iov_len = vq->vring.desc[i].len; 652 iov[*out_num + *in_num].iov_len = desc[i].len;
685 iov[*out_num + *in_num].iov_base 653 iov[*out_num + *in_num].iov_base
686 = check_pointer(vq->vring.desc[i].addr, 654 = check_pointer(desc[i].addr, desc[i].len);
687 vq->vring.desc[i].len);
688 /* If this is an input descriptor, increment that count. */ 655 /* If this is an input descriptor, increment that count. */
689 if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) 656 if (desc[i].flags & VRING_DESC_F_WRITE)
690 (*in_num)++; 657 (*in_num)++;
691 else { 658 else {
692 /* If it's an output descriptor, they're all supposed 659 /* If it's an output descriptor, they're all supposed
@@ -697,11 +664,10 @@ static unsigned get_vq_desc(struct virtqueue *vq,
697 } 664 }
698 665
699 /* If we've got too many, that implies a descriptor loop. */ 666 /* If we've got too many, that implies a descriptor loop. */
700 if (*out_num + *in_num > vq->vring.num) 667 if (*out_num + *in_num > max)
701 errx(1, "Looped descriptor"); 668 errx(1, "Looped descriptor");
702 } while ((i = next_desc(vq, i)) != vq->vring.num); 669 } while ((i = next_desc(desc, i, max)) != max);
703 670
704 vq->inflight++;
705 return head; 671 return head;
706} 672}
707 673
@@ -719,44 +685,20 @@ static void add_used(struct virtqueue *vq, unsigned int head, int len)
719 /* Make sure buffer is written before we update index. */ 685 /* Make sure buffer is written before we update index. */
720 wmb(); 686 wmb();
721 vq->vring.used->idx++; 687 vq->vring.used->idx++;
722 vq->inflight--; 688 vq->pending_used++;
723}
724
725/* This actually sends the interrupt for this virtqueue */
726static void trigger_irq(int fd, struct virtqueue *vq)
727{
728 unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };
729
730 /* If they don't want an interrupt, don't send one, unless empty. */
731 if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
732 && vq->inflight)
733 return;
734
735 /* Send the Guest an interrupt tell them we used something up. */
736 if (write(fd, buf, sizeof(buf)) != 0)
737 err(1, "Triggering irq %i", vq->config.irq);
738} 689}
739 690
740/* And here's the combo meal deal. Supersize me! */ 691/* And here's the combo meal deal. Supersize me! */
741static void add_used_and_trigger(int fd, struct virtqueue *vq, 692static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len)
742 unsigned int head, int len)
743{ 693{
744 add_used(vq, head, len); 694 add_used(vq, head, len);
745 trigger_irq(fd, vq); 695 trigger_irq(vq);
746} 696}
747 697
748/* 698/*
749 * The Console 699 * The Console
750 * 700 *
751 * Here is the input terminal setting we save, and the routine to restore them 701 * We associate some data with the console for our exit hack. */
752 * on exit so the user gets their terminal back. */
753static struct termios orig_term;
754static void restore_term(void)
755{
756 tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
757}
758
759/* We associate some data with the console for our exit hack. */
760struct console_abort 702struct console_abort
761{ 703{
762 /* How many times have they hit ^C? */ 704 /* How many times have they hit ^C? */
@@ -766,276 +708,275 @@ struct console_abort
766}; 708};
767 709
768/* This is the routine which handles console input (ie. stdin). */ 710/* This is the routine which handles console input (ie. stdin). */
769static bool handle_console_input(int fd, struct device *dev) 711static void console_input(struct virtqueue *vq)
770{ 712{
771 int len; 713 int len;
772 unsigned int head, in_num, out_num; 714 unsigned int head, in_num, out_num;
773 struct iovec iov[dev->vq->vring.num]; 715 struct console_abort *abort = vq->dev->priv;
774 struct console_abort *abort = dev->priv; 716 struct iovec iov[vq->vring.num];
775
776 /* First we need a console buffer from the Guests's input virtqueue. */
777 head = get_vq_desc(dev->vq, iov, &out_num, &in_num);
778
779 /* If they're not ready for input, stop listening to this file
780 * descriptor. We'll start again once they add an input buffer. */
781 if (head == dev->vq->vring.num)
782 return false;
783 717
718 /* Make sure there's a descriptor waiting. */
719 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
784 if (out_num) 720 if (out_num)
785 errx(1, "Output buffers in console in queue?"); 721 errx(1, "Output buffers in console in queue?");
786 722
787 /* This is why we convert to iovecs: the readv() call uses them, and so 723 /* Read it in. */
788 * it reads straight into the Guest's buffer. */ 724 len = readv(STDIN_FILENO, iov, in_num);
789 len = readv(dev->fd, iov, in_num);
790 if (len <= 0) { 725 if (len <= 0) {
791 /* This implies that the console is closed, is /dev/null, or 726 /* Ran out of input? */
792 * something went terribly wrong. */
793 warnx("Failed to get console input, ignoring console."); 727 warnx("Failed to get console input, ignoring console.");
794 /* Put the input terminal back. */ 728 /* For simplicity, dying threads kill the whole Launcher. So
795 restore_term(); 729 * just nap here. */
796 /* Remove callback from input vq, so it doesn't restart us. */ 730 for (;;)
797 dev->vq->handle_output = NULL; 731 pause();
798 /* Stop listening to this fd: don't call us again. */
799 return false;
800 } 732 }
801 733
802 /* Tell the Guest about the new input. */ 734 add_used_and_trigger(vq, head, len);
803 add_used_and_trigger(fd, dev->vq, head, len);
804 735
805 /* Three ^C within one second? Exit. 736 /* Three ^C within one second? Exit.
806 * 737 *
807 * This is such a hack, but works surprisingly well. Each ^C has to be 738 * This is such a hack, but works surprisingly well. Each ^C has to
808 * in a buffer by itself, so they can't be too fast. But we check that 739 * be in a buffer by itself, so they can't be too fast. But we check
809 * we get three within about a second, so they can't be too slow. */ 740 * that we get three within about a second, so they can't be too
810 if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) { 741 * slow. */
811 if (!abort->count++) 742 if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) {
812 gettimeofday(&abort->start, NULL);
813 else if (abort->count == 3) {
814 struct timeval now;
815 gettimeofday(&now, NULL);
816 if (now.tv_sec <= abort->start.tv_sec+1) {
817 unsigned long args[] = { LHREQ_BREAK, 0 };
818 /* Close the fd so Waker will know it has to
819 * exit. */
820 close(waker_fds.pipe[1]);
821 /* Just in case Waker is blocked in BREAK, send
822 * unbreak now. */
823 write(fd, args, sizeof(args));
824 exit(2);
825 }
826 abort->count = 0;
827 }
828 } else
829 /* Any other key resets the abort counter. */
830 abort->count = 0; 743 abort->count = 0;
744 return;
745 }
831 746
832 /* Everything went OK! */ 747 abort->count++;
833 return true; 748 if (abort->count == 1)
749 gettimeofday(&abort->start, NULL);
750 else if (abort->count == 3) {
751 struct timeval now;
752 gettimeofday(&now, NULL);
753 /* Kill all Launcher processes with SIGINT, like normal ^C */
754 if (now.tv_sec <= abort->start.tv_sec+1)
755 kill(0, SIGINT);
756 abort->count = 0;
757 }
834} 758}
835 759
836/* Handling output for console is simple: we just get all the output buffers 760/* This is the routine which handles console output (ie. stdout). */
837 * and write them to stdout. */ 761static void console_output(struct virtqueue *vq)
838static void handle_console_output(int fd, struct virtqueue *vq, bool timeout)
839{ 762{
840 unsigned int head, out, in; 763 unsigned int head, out, in;
841 int len;
842 struct iovec iov[vq->vring.num]; 764 struct iovec iov[vq->vring.num];
843 765
844 /* Keep getting output buffers from the Guest until we run out. */ 766 head = wait_for_vq_desc(vq, iov, &out, &in);
845 while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { 767 if (in)
846 if (in) 768 errx(1, "Input buffers in console output queue?");
847 errx(1, "Input buffers in output queue?"); 769 while (!iov_empty(iov, out)) {
848 len = writev(STDOUT_FILENO, iov, out); 770 int len = writev(STDOUT_FILENO, iov, out);
849 add_used_and_trigger(fd, vq, head, len); 771 if (len <= 0)
772 err(1, "Write to stdout gave %i", len);
773 iov_consume(iov, out, len);
850 } 774 }
851} 775 add_used(vq, head, 0);
852
853/* This is called when we no longer want to hear about Guest changes to a
854 * virtqueue. This is more efficient in high-traffic cases, but it means we
855 * have to set a timer to check if any more changes have occurred. */
856static void block_vq(struct virtqueue *vq)
857{
858 struct itimerval itm;
859
860 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
861 vq->blocked = true;
862
863 itm.it_interval.tv_sec = 0;
864 itm.it_interval.tv_usec = 0;
865 itm.it_value.tv_sec = 0;
866 itm.it_value.tv_usec = timeout_usec;
867
868 setitimer(ITIMER_REAL, &itm, NULL);
869} 776}
870 777
871/* 778/*
872 * The Network 779 * The Network
873 * 780 *
874 * Handling output for network is also simple: we get all the output buffers 781 * Handling output for network is also simple: we get all the output buffers
875 * and write them (ignoring the first element) to this device's file descriptor 782 * and write them to /dev/net/tun.
876 * (/dev/net/tun).
877 */ 783 */
878static void handle_net_output(int fd, struct virtqueue *vq, bool timeout) 784struct net_info {
785 int tunfd;
786};
787
788static void net_output(struct virtqueue *vq)
879{ 789{
880 unsigned int head, out, in, num = 0; 790 struct net_info *net_info = vq->dev->priv;
881 int len; 791 unsigned int head, out, in;
882 struct iovec iov[vq->vring.num]; 792 struct iovec iov[vq->vring.num];
883 static int last_timeout_num;
884
885 /* Keep getting output buffers from the Guest until we run out. */
886 while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) {
887 if (in)
888 errx(1, "Input buffers in output queue?");
889 len = writev(vq->dev->fd, iov, out);
890 if (len < 0)
891 err(1, "Writing network packet to tun");
892 add_used_and_trigger(fd, vq, head, len);
893 num++;
894 }
895 793
896 /* Block further kicks and set up a timer if we saw anything. */ 794 head = wait_for_vq_desc(vq, iov, &out, &in);
897 if (!timeout && num) 795 if (in)
898 block_vq(vq); 796 errx(1, "Input buffers in net output queue?");
899 797 if (writev(net_info->tunfd, iov, out) < 0)
900 /* We never quite know how long should we wait before we check the 798 errx(1, "Write to tun failed?");
901 * queue again for more packets. We start at 500 microseconds, and if 799 add_used(vq, head, 0);
902 * we get fewer packets than last time, we assume we made the timeout 800}
903 * too small and increase it by 10 microseconds. Otherwise, we drop it 801
904 * by one microsecond every time. It seems to work well enough. */ 802/* Will reading from this file descriptor block? */
905 if (timeout) { 803static bool will_block(int fd)
906 if (num < last_timeout_num) 804{
907 timeout_usec += 10; 805 fd_set fdset;
908 else if (timeout_usec > 1) 806 struct timeval zero = { 0, 0 };
909 timeout_usec--; 807 FD_ZERO(&fdset);
910 last_timeout_num = num; 808 FD_SET(fd, &fdset);
911 } 809 return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
912} 810}
913 811
914/* This is where we handle a packet coming in from the tun device to our 812/* This is where we handle packets coming in from the tun device to our
915 * Guest. */ 813 * Guest. */
916static bool handle_tun_input(int fd, struct device *dev) 814static void net_input(struct virtqueue *vq)
917{ 815{
918 unsigned int head, in_num, out_num;
919 int len; 816 int len;
920 struct iovec iov[dev->vq->vring.num]; 817 unsigned int head, out, in;
921 818 struct iovec iov[vq->vring.num];
922 /* First we need a network buffer from the Guests's recv virtqueue. */ 819 struct net_info *net_info = vq->dev->priv;
923 head = get_vq_desc(dev->vq, iov, &out_num, &in_num);
924 if (head == dev->vq->vring.num) {
925 /* Now, it's expected that if we try to send a packet too
926 * early, the Guest won't be ready yet. Wait until the device
927 * status says it's ready. */
928 /* FIXME: Actually want DRIVER_ACTIVE here. */
929
930 /* Now tell it we want to know if new things appear. */
931 dev->vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
932 wmb();
933
934 /* We'll turn this back on if input buffers are registered. */
935 return false;
936 } else if (out_num)
937 errx(1, "Output buffers in network recv queue?");
938
939 /* Read the packet from the device directly into the Guest's buffer. */
940 len = readv(dev->fd, iov, in_num);
941 if (len <= 0)
942 err(1, "reading network");
943 820
944 /* Tell the Guest about the new packet. */ 821 head = wait_for_vq_desc(vq, iov, &out, &in);
945 add_used_and_trigger(fd, dev->vq, head, len); 822 if (out)
823 errx(1, "Output buffers in net input queue?");
946 824
947 verbose("tun input packet len %i [%02x %02x] (%s)\n", len, 825 /* Deliver interrupt now, since we're about to sleep. */
948 ((u8 *)iov[1].iov_base)[0], ((u8 *)iov[1].iov_base)[1], 826 if (vq->pending_used && will_block(net_info->tunfd))
949 head != dev->vq->vring.num ? "sent" : "discarded"); 827 trigger_irq(vq);
950 828
951 /* All good. */ 829 len = readv(net_info->tunfd, iov, in);
952 return true; 830 if (len <= 0)
831 err(1, "Failed to read from tun.");
832 add_used(vq, head, len);
953} 833}
954 834
955/*L:215 This is the callback attached to the network and console input 835/* This is the helper to create threads. */
956 * virtqueues: it ensures we try again, in case we stopped console or net 836static int do_thread(void *_vq)
957 * delivery because Guest didn't have any buffers. */
958static void enable_fd(int fd, struct virtqueue *vq, bool timeout)
959{ 837{
960 add_device_fd(vq->dev->fd); 838 struct virtqueue *vq = _vq;
961 /* Snap the Waker out of its select loop. */ 839
962 write(waker_fds.pipe[1], "", 1); 840 for (;;)
841 vq->service(vq);
842 return 0;
963} 843}
964 844
965static void net_enable_fd(int fd, struct virtqueue *vq, bool timeout) 845/* When a child dies, we kill our entire process group with SIGTERM. This
846 * also has the side effect that the shell restores the console for us! */
847static void kill_launcher(int signal)
966{ 848{
967 /* We don't need to know again when Guest refills receive buffer. */ 849 kill(0, SIGTERM);
968 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
969 enable_fd(fd, vq, timeout);
970} 850}
971 851
972/* When the Guest tells us they updated the status field, we handle it. */ 852static void reset_device(struct device *dev)
973static void update_device_status(struct device *dev)
974{ 853{
975 struct virtqueue *vq; 854 struct virtqueue *vq;
976 855
977 /* This is a reset. */ 856 verbose("Resetting device %s\n", dev->name);
978 if (dev->desc->status == 0) {
979 verbose("Resetting device %s\n", dev->name);
980 857
981 /* Clear any features they've acked. */ 858 /* Clear any features they've acked. */
982 memset(get_feature_bits(dev) + dev->desc->feature_len, 0, 859 memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len);
983 dev->desc->feature_len);
984 860
985 /* Zero out the virtqueues. */ 861 /* We're going to be explicitly killing threads, so ignore them. */
986 for (vq = dev->vq; vq; vq = vq->next) { 862 signal(SIGCHLD, SIG_IGN);
987 memset(vq->vring.desc, 0, 863
988 vring_size(vq->config.num, LGUEST_VRING_ALIGN)); 864 /* Zero out the virtqueues, get rid of their threads */
989 lg_last_avail(vq) = 0; 865 for (vq = dev->vq; vq; vq = vq->next) {
866 if (vq->thread != (pid_t)-1) {
867 kill(vq->thread, SIGTERM);
868 waitpid(vq->thread, NULL, 0);
869 vq->thread = (pid_t)-1;
990 } 870 }
991 } else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { 871 memset(vq->vring.desc, 0,
872 vring_size(vq->config.num, LGUEST_VRING_ALIGN));
873 lg_last_avail(vq) = 0;
874 }
875 dev->running = false;
876
877 /* Now we care if threads die. */
878 signal(SIGCHLD, (void *)kill_launcher);
879}
880
881static void create_thread(struct virtqueue *vq)
882{
883 /* Create stack for thread and run it. Since stack grows
884 * upwards, we point the stack pointer to the end of this
885 * region. */
886 char *stack = malloc(32768);
887 unsigned long args[] = { LHREQ_EVENTFD,
888 vq->config.pfn*getpagesize(), 0 };
889
890 /* Create a zero-initialized eventfd. */
891 vq->eventfd = eventfd(0, 0);
892 if (vq->eventfd < 0)
893 err(1, "Creating eventfd");
894 args[2] = vq->eventfd;
895
896 /* Attach an eventfd to this virtqueue: it will go off
897 * when the Guest does an LHCALL_NOTIFY for this vq. */
898 if (write(lguest_fd, &args, sizeof(args)) != 0)
899 err(1, "Attaching eventfd");
900
901 /* CLONE_VM: because it has to access the Guest memory, and
902 * SIGCHLD so we get a signal if it dies. */
903 vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
904 if (vq->thread == (pid_t)-1)
905 err(1, "Creating clone");
906 /* We close our local copy, now the child has it. */
907 close(vq->eventfd);
908}
909
910static void start_device(struct device *dev)
911{
912 unsigned int i;
913 struct virtqueue *vq;
914
915 verbose("Device %s OK: offered", dev->name);
916 for (i = 0; i < dev->feature_len; i++)
917 verbose(" %02x", get_feature_bits(dev)[i]);
918 verbose(", accepted");
919 for (i = 0; i < dev->feature_len; i++)
920 verbose(" %02x", get_feature_bits(dev)
921 [dev->feature_len+i]);
922
923 for (vq = dev->vq; vq; vq = vq->next) {
924 if (vq->service)
925 create_thread(vq);
926 }
927 dev->running = true;
928}
929
930static void cleanup_devices(void)
931{
932 struct device *dev;
933
934 for (dev = devices.dev; dev; dev = dev->next)
935 reset_device(dev);
936
937 /* If we saved off the original terminal settings, restore them now. */
938 if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
939 tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
940}
941
942/* When the Guest tells us they updated the status field, we handle it. */
943static void update_device_status(struct device *dev)
944{
945 /* A zero status is a reset, otherwise it's a set of flags. */
946 if (dev->desc->status == 0)
947 reset_device(dev);
948 else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) {
992 warnx("Device %s configuration FAILED", dev->name); 949 warnx("Device %s configuration FAILED", dev->name);
950 if (dev->running)
951 reset_device(dev);
993 } else if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { 952 } else if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) {
994 unsigned int i; 953 if (!dev->running)
995 954 start_device(dev);
996 verbose("Device %s OK: offered", dev->name);
997 for (i = 0; i < dev->desc->feature_len; i++)
998 verbose(" %02x", get_feature_bits(dev)[i]);
999 verbose(", accepted");
1000 for (i = 0; i < dev->desc->feature_len; i++)
1001 verbose(" %02x", get_feature_bits(dev)
1002 [dev->desc->feature_len+i]);
1003
1004 if (dev->ready)
1005 dev->ready(dev);
1006 } 955 }
1007} 956}
1008 957
1009/* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ 958/* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */
1010static void handle_output(int fd, unsigned long addr) 959static void handle_output(unsigned long addr)
1011{ 960{
1012 struct device *i; 961 struct device *i;
1013 struct virtqueue *vq;
1014 962
1015 /* Check each device and virtqueue. */ 963 /* Check each device. */
1016 for (i = devices.dev; i; i = i->next) { 964 for (i = devices.dev; i; i = i->next) {
965 struct virtqueue *vq;
966
1017 /* Notifications to device descriptors update device status. */ 967 /* Notifications to device descriptors update device status. */
1018 if (from_guest_phys(addr) == i->desc) { 968 if (from_guest_phys(addr) == i->desc) {
1019 update_device_status(i); 969 update_device_status(i);
1020 return; 970 return;
1021 } 971 }
1022 972
1023 /* Notifications to virtqueues mean output has occurred. */ 973 /* Devices *can* be used before status is set to DRIVER_OK. */
1024 for (vq = i->vq; vq; vq = vq->next) { 974 for (vq = i->vq; vq; vq = vq->next) {
1025 if (vq->config.pfn != addr/getpagesize()) 975 if (addr != vq->config.pfn*getpagesize())
1026 continue; 976 continue;
1027 977 if (i->running)
1028 /* Guest should acknowledge (and set features!) before 978 errx(1, "Notification on running %s", i->name);
1029 * using the device. */ 979 start_device(i);
1030 if (i->desc->status == 0) {
1031 warnx("%s gave early output", i->name);
1032 return;
1033 }
1034
1035 if (strcmp(vq->dev->name, "console") != 0)
1036 verbose("Output to %s\n", vq->dev->name);
1037 if (vq->handle_output)
1038 vq->handle_output(fd, vq, false);
1039 return; 980 return;
1040 } 981 }
1041 } 982 }
@@ -1049,71 +990,6 @@ static void handle_output(int fd, unsigned long addr)
1049 strnlen(from_guest_phys(addr), guest_limit - addr)); 990 strnlen(from_guest_phys(addr), guest_limit - addr));
1050} 991}
1051 992
1052static void handle_timeout(int fd)
1053{
1054 char buf[32];
1055 struct device *i;
1056 struct virtqueue *vq;
1057
1058 /* Clear the pipe */
1059 read(timeoutpipe[0], buf, sizeof(buf));
1060
1061 /* Check each device and virtqueue: flush blocked ones. */
1062 for (i = devices.dev; i; i = i->next) {
1063 for (vq = i->vq; vq; vq = vq->next) {
1064 if (!vq->blocked)
1065 continue;
1066
1067 vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
1068 vq->blocked = false;
1069 if (vq->handle_output)
1070 vq->handle_output(fd, vq, true);
1071 }
1072 }
1073}
1074
1075/* This is called when the Waker wakes us up: check for incoming file
1076 * descriptors. */
1077static void handle_input(int fd)
1078{
1079 /* select() wants a zeroed timeval to mean "don't wait". */
1080 struct timeval poll = { .tv_sec = 0, .tv_usec = 0 };
1081
1082 for (;;) {
1083 struct device *i;
1084 fd_set fds = devices.infds;
1085 int num;
1086
1087 num = select(devices.max_infd+1, &fds, NULL, NULL, &poll);
1088 /* Could get interrupted */
1089 if (num < 0)
1090 continue;
1091 /* If nothing is ready, we're done. */
1092 if (num == 0)
1093 break;
1094
1095 /* Otherwise, call the device(s) which have readable file
1096 * descriptors and a method of handling them. */
1097 for (i = devices.dev; i; i = i->next) {
1098 if (i->handle_input && FD_ISSET(i->fd, &fds)) {
1099 if (i->handle_input(fd, i))
1100 continue;
1101
1102 /* If handle_input() returns false, it means we
1103 * should no longer service it. Networking and
1104 * console do this when there's no input
1105 * buffers to deliver into. Console also uses
1106 * it when it discovers that stdin is closed. */
1107 FD_CLR(i->fd, &devices.infds);
1108 }
1109 }
1110
1111 /* Is this the timeout fd? */
1112 if (FD_ISSET(timeoutpipe[0], &fds))
1113 handle_timeout(fd);
1114 }
1115}
1116
1117/*L:190 993/*L:190
1118 * Device Setup 994 * Device Setup
1119 * 995 *
@@ -1129,8 +1005,8 @@ static void handle_input(int fd)
1129static u8 *device_config(const struct device *dev) 1005static u8 *device_config(const struct device *dev)
1130{ 1006{
1131 return (void *)(dev->desc + 1) 1007 return (void *)(dev->desc + 1)
1132 + dev->desc->num_vq * sizeof(struct lguest_vqconfig) 1008 + dev->num_vq * sizeof(struct lguest_vqconfig)
1133 + dev->desc->feature_len * 2; 1009 + dev->feature_len * 2;
1134} 1010}
1135 1011
1136/* This routine allocates a new "struct lguest_device_desc" from descriptor 1012/* This routine allocates a new "struct lguest_device_desc" from descriptor
@@ -1159,7 +1035,7 @@ static struct lguest_device_desc *new_dev_desc(u16 type)
1159/* Each device descriptor is followed by the description of its virtqueues. We 1035/* Each device descriptor is followed by the description of its virtqueues. We
1160 * specify how many descriptors the virtqueue is to have. */ 1036 * specify how many descriptors the virtqueue is to have. */
1161static void add_virtqueue(struct device *dev, unsigned int num_descs, 1037static void add_virtqueue(struct device *dev, unsigned int num_descs,
1162 void (*handle_output)(int, struct virtqueue *, bool)) 1038 void (*service)(struct virtqueue *))
1163{ 1039{
1164 unsigned int pages; 1040 unsigned int pages;
1165 struct virtqueue **i, *vq = malloc(sizeof(*vq)); 1041 struct virtqueue **i, *vq = malloc(sizeof(*vq));
@@ -1174,8 +1050,8 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
1174 vq->next = NULL; 1050 vq->next = NULL;
1175 vq->last_avail_idx = 0; 1051 vq->last_avail_idx = 0;
1176 vq->dev = dev; 1052 vq->dev = dev;
1177 vq->inflight = 0; 1053 vq->service = service;
1178 vq->blocked = false; 1054 vq->thread = (pid_t)-1;
1179 1055
1180 /* Initialize the configuration. */ 1056 /* Initialize the configuration. */
1181 vq->config.num = num_descs; 1057 vq->config.num = num_descs;
@@ -1191,6 +1067,7 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
1191 * yet, otherwise we'd be overwriting them. */ 1067 * yet, otherwise we'd be overwriting them. */
1192 assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); 1068 assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0);
1193 memcpy(device_config(dev), &vq->config, sizeof(vq->config)); 1069 memcpy(device_config(dev), &vq->config, sizeof(vq->config));
1070 dev->num_vq++;
1194 dev->desc->num_vq++; 1071 dev->desc->num_vq++;
1195 1072
1196 verbose("Virtqueue page %#lx\n", to_guest_phys(p)); 1073 verbose("Virtqueue page %#lx\n", to_guest_phys(p));
@@ -1199,15 +1076,6 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
1199 * second. */ 1076 * second. */
1200 for (i = &dev->vq; *i; i = &(*i)->next); 1077 for (i = &dev->vq; *i; i = &(*i)->next);
1201 *i = vq; 1078 *i = vq;
1202
1203 /* Set the routine to call when the Guest does something to this
1204 * virtqueue. */
1205 vq->handle_output = handle_output;
1206
1207 /* As an optimization, set the advisory "Don't Notify Me" flag if we
1208 * don't have a handler */
1209 if (!handle_output)
1210 vq->vring.used->flags = VRING_USED_F_NO_NOTIFY;
1211} 1079}
1212 1080
1213/* The first half of the feature bitmask is for us to advertise features. The 1081/* The first half of the feature bitmask is for us to advertise features. The
@@ -1219,7 +1087,7 @@ static void add_feature(struct device *dev, unsigned bit)
1219 /* We can't extend the feature bits once we've added config bytes */ 1087 /* We can't extend the feature bits once we've added config bytes */
1220 if (dev->desc->feature_len <= bit / CHAR_BIT) { 1088 if (dev->desc->feature_len <= bit / CHAR_BIT) {
1221 assert(dev->desc->config_len == 0); 1089 assert(dev->desc->config_len == 0);
1222 dev->desc->feature_len = (bit / CHAR_BIT) + 1; 1090 dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1;
1223 } 1091 }
1224 1092
1225 features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); 1093 features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT));
@@ -1243,22 +1111,17 @@ static void set_config(struct device *dev, unsigned len, const void *conf)
1243 * calling new_dev_desc() to allocate the descriptor and device memory. 1111 * calling new_dev_desc() to allocate the descriptor and device memory.
1244 * 1112 *
1245 * See what I mean about userspace being boring? */ 1113 * See what I mean about userspace being boring? */
1246static struct device *new_device(const char *name, u16 type, int fd, 1114static struct device *new_device(const char *name, u16 type)
1247 bool (*handle_input)(int, struct device *))
1248{ 1115{
1249 struct device *dev = malloc(sizeof(*dev)); 1116 struct device *dev = malloc(sizeof(*dev));
1250 1117
1251 /* Now we populate the fields one at a time. */ 1118 /* Now we populate the fields one at a time. */
1252 dev->fd = fd;
1253 /* If we have an input handler for this file descriptor, then we add it
1254 * to the device_list's fdset and maxfd. */
1255 if (handle_input)
1256 add_device_fd(dev->fd);
1257 dev->desc = new_dev_desc(type); 1119 dev->desc = new_dev_desc(type);
1258 dev->handle_input = handle_input;
1259 dev->name = name; 1120 dev->name = name;
1260 dev->vq = NULL; 1121 dev->vq = NULL;
1261 dev->ready = NULL; 1122 dev->feature_len = 0;
1123 dev->num_vq = 0;
1124 dev->running = false;
1262 1125
1263 /* Append to device list. Prepending to a single-linked list is 1126 /* Append to device list. Prepending to a single-linked list is
1264 * easier, but the user expects the devices to be arranged on the bus 1127 * easier, but the user expects the devices to be arranged on the bus
@@ -1286,13 +1149,10 @@ static void setup_console(void)
1286 * raw input stream to the Guest. */ 1149 * raw input stream to the Guest. */
1287 term.c_lflag &= ~(ISIG|ICANON|ECHO); 1150 term.c_lflag &= ~(ISIG|ICANON|ECHO);
1288 tcsetattr(STDIN_FILENO, TCSANOW, &term); 1151 tcsetattr(STDIN_FILENO, TCSANOW, &term);
1289 /* If we exit gracefully, the original settings will be
1290 * restored so the user can see what they're typing. */
1291 atexit(restore_term);
1292 } 1152 }
1293 1153
1294 dev = new_device("console", VIRTIO_ID_CONSOLE, 1154 dev = new_device("console", VIRTIO_ID_CONSOLE);
1295 STDIN_FILENO, handle_console_input); 1155
1296 /* We store the console state in dev->priv, and initialize it. */ 1156 /* We store the console state in dev->priv, and initialize it. */
1297 dev->priv = malloc(sizeof(struct console_abort)); 1157 dev->priv = malloc(sizeof(struct console_abort));
1298 ((struct console_abort *)dev->priv)->count = 0; 1158 ((struct console_abort *)dev->priv)->count = 0;
@@ -1301,31 +1161,13 @@ static void setup_console(void)
1301 * they put something the input queue, we make sure we're listening to 1161 * they put something the input queue, we make sure we're listening to
1302 * stdin. When they put something in the output queue, we write it to 1162 * stdin. When they put something in the output queue, we write it to
1303 * stdout. */ 1163 * stdout. */
1304 add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); 1164 add_virtqueue(dev, VIRTQUEUE_NUM, console_input);
1305 add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output); 1165 add_virtqueue(dev, VIRTQUEUE_NUM, console_output);
1306 1166
1307 verbose("device %u: console\n", devices.device_num++); 1167 verbose("device %u: console\n", ++devices.device_num);
1308} 1168}
1309/*:*/ 1169/*:*/
1310 1170
1311static void timeout_alarm(int sig)
1312{
1313 write(timeoutpipe[1], "", 1);
1314}
1315
1316static void setup_timeout(void)
1317{
1318 if (pipe(timeoutpipe) != 0)
1319 err(1, "Creating timeout pipe");
1320
1321 if (fcntl(timeoutpipe[1], F_SETFL,
1322 fcntl(timeoutpipe[1], F_GETFL) | O_NONBLOCK) != 0)
1323 err(1, "Making timeout pipe nonblocking");
1324
1325 add_device_fd(timeoutpipe[0]);
1326 signal(SIGALRM, timeout_alarm);
1327}
1328
1329/*M:010 Inter-guest networking is an interesting area. Simplest is to have a 1171/*M:010 Inter-guest networking is an interesting area. Simplest is to have a
1330 * --sharenet=<name> option which opens or creates a named pipe. This can be 1172 * --sharenet=<name> option which opens or creates a named pipe. This can be
1331 * used to send packets to another guest in a 1:1 manner. 1173 * used to send packets to another guest in a 1:1 manner.
@@ -1447,21 +1289,23 @@ static int get_tun_device(char tapif[IFNAMSIZ])
1447static void setup_tun_net(char *arg) 1289static void setup_tun_net(char *arg)
1448{ 1290{
1449 struct device *dev; 1291 struct device *dev;
1450 int netfd, ipfd; 1292 struct net_info *net_info = malloc(sizeof(*net_info));
1293 int ipfd;
1451 u32 ip = INADDR_ANY; 1294 u32 ip = INADDR_ANY;
1452 bool bridging = false; 1295 bool bridging = false;
1453 char tapif[IFNAMSIZ], *p; 1296 char tapif[IFNAMSIZ], *p;
1454 struct virtio_net_config conf; 1297 struct virtio_net_config conf;
1455 1298
1456 netfd = get_tun_device(tapif); 1299 net_info->tunfd = get_tun_device(tapif);
1457 1300
1458 /* First we create a new network device. */ 1301 /* First we create a new network device. */
1459 dev = new_device("net", VIRTIO_ID_NET, netfd, handle_tun_input); 1302 dev = new_device("net", VIRTIO_ID_NET);
1303 dev->priv = net_info;
1460 1304
1461 /* Network devices need a receive and a send queue, just like 1305 /* Network devices need a receive and a send queue, just like
1462 * console. */ 1306 * console. */
1463 add_virtqueue(dev, VIRTQUEUE_NUM, net_enable_fd); 1307 add_virtqueue(dev, VIRTQUEUE_NUM, net_input);
1464 add_virtqueue(dev, VIRTQUEUE_NUM, handle_net_output); 1308 add_virtqueue(dev, VIRTQUEUE_NUM, net_output);
1465 1309
1466 /* We need a socket to perform the magic network ioctls to bring up the 1310 /* We need a socket to perform the magic network ioctls to bring up the
1467 * tap interface, connect to the bridge etc. Any socket will do! */ 1311 * tap interface, connect to the bridge etc. Any socket will do! */
@@ -1502,6 +1346,8 @@ static void setup_tun_net(char *arg)
1502 add_feature(dev, VIRTIO_NET_F_HOST_TSO4); 1346 add_feature(dev, VIRTIO_NET_F_HOST_TSO4);
1503 add_feature(dev, VIRTIO_NET_F_HOST_TSO6); 1347 add_feature(dev, VIRTIO_NET_F_HOST_TSO6);
1504 add_feature(dev, VIRTIO_NET_F_HOST_ECN); 1348 add_feature(dev, VIRTIO_NET_F_HOST_ECN);
1349 /* We handle indirect ring entries */
1350 add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
1505 set_config(dev, sizeof(conf), &conf); 1351 set_config(dev, sizeof(conf), &conf);
1506 1352
1507 /* We don't need the socket any more; setup is done. */ 1353 /* We don't need the socket any more; setup is done. */
@@ -1550,20 +1396,18 @@ struct vblk_info
1550 * Remember that the block device is handled by a separate I/O thread. We head 1396 * Remember that the block device is handled by a separate I/O thread. We head
1551 * straight into the core of that thread here: 1397 * straight into the core of that thread here:
1552 */ 1398 */
1553static bool service_io(struct device *dev) 1399static void blk_request(struct virtqueue *vq)
1554{ 1400{
1555 struct vblk_info *vblk = dev->priv; 1401 struct vblk_info *vblk = vq->dev->priv;
1556 unsigned int head, out_num, in_num, wlen; 1402 unsigned int head, out_num, in_num, wlen;
1557 int ret; 1403 int ret;
1558 u8 *in; 1404 u8 *in;
1559 struct virtio_blk_outhdr *out; 1405 struct virtio_blk_outhdr *out;
1560 struct iovec iov[dev->vq->vring.num]; 1406 struct iovec iov[vq->vring.num];
1561 off64_t off; 1407 off64_t off;
1562 1408
1563 /* See if there's a request waiting. If not, nothing to do. */ 1409 /* Get the next request. */
1564 head = get_vq_desc(dev->vq, iov, &out_num, &in_num); 1410 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
1565 if (head == dev->vq->vring.num)
1566 return false;
1567 1411
1568 /* Every block request should contain at least one output buffer 1412 /* Every block request should contain at least one output buffer
1569 * (detailing the location on disk and the type of request) and one 1413 * (detailing the location on disk and the type of request) and one
@@ -1637,83 +1481,21 @@ static bool service_io(struct device *dev)
1637 if (out->type & VIRTIO_BLK_T_BARRIER) 1481 if (out->type & VIRTIO_BLK_T_BARRIER)
1638 fdatasync(vblk->fd); 1482 fdatasync(vblk->fd);
1639 1483
1640 /* We can't trigger an IRQ, because we're not the Launcher. It does 1484 add_used(vq, head, wlen);
1641 * that when we tell it we're done. */
1642 add_used(dev->vq, head, wlen);
1643 return true;
1644}
1645
1646/* This is the thread which actually services the I/O. */
1647static int io_thread(void *_dev)
1648{
1649 struct device *dev = _dev;
1650 struct vblk_info *vblk = dev->priv;
1651 char c;
1652
1653 /* Close other side of workpipe so we get 0 read when main dies. */
1654 close(vblk->workpipe[1]);
1655 /* Close the other side of the done_fd pipe. */
1656 close(dev->fd);
1657
1658 /* When this read fails, it means Launcher died, so we follow. */
1659 while (read(vblk->workpipe[0], &c, 1) == 1) {
1660 /* We acknowledge each request immediately to reduce latency,
1661 * rather than waiting until we've done them all. I haven't
1662 * measured to see if it makes any difference.
1663 *
1664 * That would be an interesting test, wouldn't it? You could
1665 * also try having more than one I/O thread. */
1666 while (service_io(dev))
1667 write(vblk->done_fd, &c, 1);
1668 }
1669 return 0;
1670}
1671
1672/* Now we've seen the I/O thread, we return to the Launcher to see what happens
1673 * when that thread tells us it's completed some I/O. */
1674static bool handle_io_finish(int fd, struct device *dev)
1675{
1676 char c;
1677
1678 /* If the I/O thread died, presumably it printed the error, so we
1679 * simply exit. */
1680 if (read(dev->fd, &c, 1) != 1)
1681 exit(1);
1682
1683 /* It did some work, so trigger the irq. */
1684 trigger_irq(fd, dev->vq);
1685 return true;
1686}
1687
1688/* When the Guest submits some I/O, we just need to wake the I/O thread. */
1689static void handle_virtblk_output(int fd, struct virtqueue *vq, bool timeout)
1690{
1691 struct vblk_info *vblk = vq->dev->priv;
1692 char c = 0;
1693
1694 /* Wake up I/O thread and tell it to go to work! */
1695 if (write(vblk->workpipe[1], &c, 1) != 1)
1696 /* Presumably it indicated why it died. */
1697 exit(1);
1698} 1485}
1699 1486
1700/*L:198 This actually sets up a virtual block device. */ 1487/*L:198 This actually sets up a virtual block device. */
1701static void setup_block_file(const char *filename) 1488static void setup_block_file(const char *filename)
1702{ 1489{
1703 int p[2];
1704 struct device *dev; 1490 struct device *dev;
1705 struct vblk_info *vblk; 1491 struct vblk_info *vblk;
1706 void *stack;
1707 struct virtio_blk_config conf; 1492 struct virtio_blk_config conf;
1708 1493
1709 /* This is the pipe the I/O thread will use to tell us I/O is done. */
1710 pipe(p);
1711
1712 /* The device responds to return from I/O thread. */ 1494 /* The device responds to return from I/O thread. */
1713 dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish); 1495 dev = new_device("block", VIRTIO_ID_BLOCK);
1714 1496
1715 /* The device has one virtqueue, where the Guest places requests. */ 1497 /* The device has one virtqueue, where the Guest places requests. */
1716 add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output); 1498 add_virtqueue(dev, VIRTQUEUE_NUM, blk_request);
1717 1499
1718 /* Allocate the room for our own bookkeeping */ 1500 /* Allocate the room for our own bookkeeping */
1719 vblk = dev->priv = malloc(sizeof(*vblk)); 1501 vblk = dev->priv = malloc(sizeof(*vblk));
@@ -1735,49 +1517,29 @@ static void setup_block_file(const char *filename)
1735 1517
1736 set_config(dev, sizeof(conf), &conf); 1518 set_config(dev, sizeof(conf), &conf);
1737 1519
1738 /* The I/O thread writes to this end of the pipe when done. */
1739 vblk->done_fd = p[1];
1740
1741 /* This is the second pipe, which is how we tell the I/O thread about
1742 * more work. */
1743 pipe(vblk->workpipe);
1744
1745 /* Create stack for thread and run it. Since stack grows upwards, we
1746 * point the stack pointer to the end of this region. */
1747 stack = malloc(32768);
1748 /* SIGCHLD - We dont "wait" for our cloned thread, so prevent it from
1749 * becoming a zombie. */
1750 if (clone(io_thread, stack + 32768, CLONE_VM | SIGCHLD, dev) == -1)
1751 err(1, "Creating clone");
1752
1753 /* We don't need to keep the I/O thread's end of the pipes open. */
1754 close(vblk->done_fd);
1755 close(vblk->workpipe[0]);
1756
1757 verbose("device %u: virtblock %llu sectors\n", 1520 verbose("device %u: virtblock %llu sectors\n",
1758 devices.device_num, le64_to_cpu(conf.capacity)); 1521 ++devices.device_num, le64_to_cpu(conf.capacity));
1759} 1522}
1760 1523
1524struct rng_info {
1525 int rfd;
1526};
1527
1761/* Our random number generator device reads from /dev/random into the Guest's 1528/* Our random number generator device reads from /dev/random into the Guest's
1762 * input buffers. The usual case is that the Guest doesn't want random numbers 1529 * input buffers. The usual case is that the Guest doesn't want random numbers
1763 * and so has no buffers although /dev/random is still readable, whereas 1530 * and so has no buffers although /dev/random is still readable, whereas
1764 * console is the reverse. 1531 * console is the reverse.
1765 * 1532 *
1766 * The same logic applies, however. */ 1533 * The same logic applies, however. */
1767static bool handle_rng_input(int fd, struct device *dev) 1534static void rng_input(struct virtqueue *vq)
1768{ 1535{
1769 int len; 1536 int len;
1770 unsigned int head, in_num, out_num, totlen = 0; 1537 unsigned int head, in_num, out_num, totlen = 0;
1771 struct iovec iov[dev->vq->vring.num]; 1538 struct rng_info *rng_info = vq->dev->priv;
1539 struct iovec iov[vq->vring.num];
1772 1540
1773 /* First we need a buffer from the Guests's virtqueue. */ 1541 /* First we need a buffer from the Guests's virtqueue. */
1774 head = get_vq_desc(dev->vq, iov, &out_num, &in_num); 1542 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
1775
1776 /* If they're not ready for input, stop listening to this file
1777 * descriptor. We'll start again once they add an input buffer. */
1778 if (head == dev->vq->vring.num)
1779 return false;
1780
1781 if (out_num) 1543 if (out_num)
1782 errx(1, "Output buffers in rng?"); 1544 errx(1, "Output buffers in rng?");
1783 1545
@@ -1785,7 +1547,7 @@ static bool handle_rng_input(int fd, struct device *dev)
1785 * it reads straight into the Guest's buffer. We loop to make sure we 1547 * it reads straight into the Guest's buffer. We loop to make sure we
1786 * fill it. */ 1548 * fill it. */
1787 while (!iov_empty(iov, in_num)) { 1549 while (!iov_empty(iov, in_num)) {
1788 len = readv(dev->fd, iov, in_num); 1550 len = readv(rng_info->rfd, iov, in_num);
1789 if (len <= 0) 1551 if (len <= 0)
1790 err(1, "Read from /dev/random gave %i", len); 1552 err(1, "Read from /dev/random gave %i", len);
1791 iov_consume(iov, in_num, len); 1553 iov_consume(iov, in_num, len);
@@ -1793,25 +1555,23 @@ static bool handle_rng_input(int fd, struct device *dev)
1793 } 1555 }
1794 1556
1795 /* Tell the Guest about the new input. */ 1557 /* Tell the Guest about the new input. */
1796 add_used_and_trigger(fd, dev->vq, head, totlen); 1558 add_used(vq, head, totlen);
1797
1798 /* Everything went OK! */
1799 return true;
1800} 1559}
1801 1560
1802/* And this creates a "hardware" random number device for the Guest. */ 1561/* And this creates a "hardware" random number device for the Guest. */
1803static void setup_rng(void) 1562static void setup_rng(void)
1804{ 1563{
1805 struct device *dev; 1564 struct device *dev;
1806 int fd; 1565 struct rng_info *rng_info = malloc(sizeof(*rng_info));
1807 1566
1808 fd = open_or_die("/dev/random", O_RDONLY); 1567 rng_info->rfd = open_or_die("/dev/random", O_RDONLY);
1809 1568
1810 /* The device responds to return from I/O thread. */ 1569 /* The device responds to return from I/O thread. */
1811 dev = new_device("rng", VIRTIO_ID_RNG, fd, handle_rng_input); 1570 dev = new_device("rng", VIRTIO_ID_RNG);
1571 dev->priv = rng_info;
1812 1572
1813 /* The device has one virtqueue, where the Guest places inbufs. */ 1573 /* The device has one virtqueue, where the Guest places inbufs. */
1814 add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); 1574 add_virtqueue(dev, VIRTQUEUE_NUM, rng_input);
1815 1575
1816 verbose("device %u: rng\n", devices.device_num++); 1576 verbose("device %u: rng\n", devices.device_num++);
1817} 1577}
@@ -1827,17 +1587,18 @@ static void __attribute__((noreturn)) restart_guest(void)
1827 for (i = 3; i < FD_SETSIZE; i++) 1587 for (i = 3; i < FD_SETSIZE; i++)
1828 close(i); 1588 close(i);
1829 1589
1830 /* The exec automatically gets rid of the I/O and Waker threads. */ 1590 /* Reset all the devices (kills all threads). */
1591 cleanup_devices();
1592
1831 execv(main_args[0], main_args); 1593 execv(main_args[0], main_args);
1832 err(1, "Could not exec %s", main_args[0]); 1594 err(1, "Could not exec %s", main_args[0]);
1833} 1595}
1834 1596
1835/*L:220 Finally we reach the core of the Launcher which runs the Guest, serves 1597/*L:220 Finally we reach the core of the Launcher which runs the Guest, serves
1836 * its input and output, and finally, lays it to rest. */ 1598 * its input and output, and finally, lays it to rest. */
1837static void __attribute__((noreturn)) run_guest(int lguest_fd) 1599static void __attribute__((noreturn)) run_guest(void)
1838{ 1600{
1839 for (;;) { 1601 for (;;) {
1840 unsigned long args[] = { LHREQ_BREAK, 0 };
1841 unsigned long notify_addr; 1602 unsigned long notify_addr;
1842 int readval; 1603 int readval;
1843 1604
@@ -1848,8 +1609,7 @@ static void __attribute__((noreturn)) run_guest(int lguest_fd)
1848 /* One unsigned long means the Guest did HCALL_NOTIFY */ 1609 /* One unsigned long means the Guest did HCALL_NOTIFY */
1849 if (readval == sizeof(notify_addr)) { 1610 if (readval == sizeof(notify_addr)) {
1850 verbose("Notify on address %#lx\n", notify_addr); 1611 verbose("Notify on address %#lx\n", notify_addr);
1851 handle_output(lguest_fd, notify_addr); 1612 handle_output(notify_addr);
1852 continue;
1853 /* ENOENT means the Guest died. Reading tells us why. */ 1613 /* ENOENT means the Guest died. Reading tells us why. */
1854 } else if (errno == ENOENT) { 1614 } else if (errno == ENOENT) {
1855 char reason[1024] = { 0 }; 1615 char reason[1024] = { 0 };
@@ -1858,19 +1618,9 @@ static void __attribute__((noreturn)) run_guest(int lguest_fd)
1858 /* ERESTART means that we need to reboot the guest */ 1618 /* ERESTART means that we need to reboot the guest */
1859 } else if (errno == ERESTART) { 1619 } else if (errno == ERESTART) {
1860 restart_guest(); 1620 restart_guest();
1861 /* EAGAIN means a signal (timeout). 1621 /* Anything else means a bug or incompatible change. */
1862 * Anything else means a bug or incompatible change. */ 1622 } else
1863 } else if (errno != EAGAIN)
1864 err(1, "Running guest failed"); 1623 err(1, "Running guest failed");
1865
1866 /* Only service input on thread for CPU 0. */
1867 if (cpu_id != 0)
1868 continue;
1869
1870 /* Service input, then unset the BREAK to release the Waker. */
1871 handle_input(lguest_fd);
1872 if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
1873 err(1, "Resetting break");
1874 } 1624 }
1875} 1625}
1876/*L:240 1626/*L:240
@@ -1904,8 +1654,8 @@ int main(int argc, char *argv[])
1904 /* Memory, top-level pagetable, code startpoint and size of the 1654 /* Memory, top-level pagetable, code startpoint and size of the
1905 * (optional) initrd. */ 1655 * (optional) initrd. */
1906 unsigned long mem = 0, start, initrd_size = 0; 1656 unsigned long mem = 0, start, initrd_size = 0;
1907 /* Two temporaries and the /dev/lguest file descriptor. */ 1657 /* Two temporaries. */
1908 int i, c, lguest_fd; 1658 int i, c;
1909 /* The boot information for the Guest. */ 1659 /* The boot information for the Guest. */
1910 struct boot_params *boot; 1660 struct boot_params *boot;
1911 /* If they specify an initrd file to load. */ 1661 /* If they specify an initrd file to load. */
@@ -1913,18 +1663,10 @@ int main(int argc, char *argv[])
1913 1663
1914 /* Save the args: we "reboot" by execing ourselves again. */ 1664 /* Save the args: we "reboot" by execing ourselves again. */
1915 main_args = argv; 1665 main_args = argv;
1916 /* We don't "wait" for the children, so prevent them from becoming
1917 * zombies. */
1918 signal(SIGCHLD, SIG_IGN);
1919 1666
1920 /* First we initialize the device list. Since console and network 1667 /* First we initialize the device list. We keep a pointer to the last
1921 * device receive input from a file descriptor, we keep an fdset 1668 * device, and the next interrupt number to use for devices (1:
1922 * (infds) and the maximum fd number (max_infd) with the head of the 1669 * remember that 0 is used by the timer). */
1923 * list. We also keep a pointer to the last device. Finally, we keep
1924 * the next interrupt number to use for devices (1: remember that 0 is
1925 * used by the timer). */
1926 FD_ZERO(&devices.infds);
1927 devices.max_infd = -1;
1928 devices.lastdev = NULL; 1670 devices.lastdev = NULL;
1929 devices.next_irq = 1; 1671 devices.next_irq = 1;
1930 1672
@@ -1982,9 +1724,6 @@ int main(int argc, char *argv[])
1982 /* We always have a console device */ 1724 /* We always have a console device */
1983 setup_console(); 1725 setup_console();
1984 1726
1985 /* We can timeout waiting for Guest network transmit. */
1986 setup_timeout();
1987
1988 /* Now we load the kernel */ 1727 /* Now we load the kernel */
1989 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); 1728 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
1990 1729
@@ -2023,15 +1762,16 @@ int main(int argc, char *argv[])
2023 1762
2024 /* We tell the kernel to initialize the Guest: this returns the open 1763 /* We tell the kernel to initialize the Guest: this returns the open
2025 * /dev/lguest file descriptor. */ 1764 * /dev/lguest file descriptor. */
2026 lguest_fd = tell_kernel(start); 1765 tell_kernel(start);
1766
1767 /* Ensure that we terminate if a child dies. */
1768 signal(SIGCHLD, kill_launcher);
2027 1769
2028 /* We clone off a thread, which wakes the Launcher whenever one of the 1770 /* If we exit via err(), this kills all the threads, restores tty. */
2029 * input file descriptors needs attention. We call this the Waker, and 1771 atexit(cleanup_devices);
2030 * we'll cover it in a moment. */
2031 setup_waker(lguest_fd);
2032 1772
2033 /* Finally, run the Guest. This doesn't return. */ 1773 /* Finally, run the Guest. This doesn't return. */
2034 run_guest(lguest_fd); 1774 run_guest();
2035} 1775}
2036/*:*/ 1776/*:*/
2037 1777
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
index 28c747362f95..efb3a6a045a2 100644
--- a/Documentation/lguest/lguest.txt
+++ b/Documentation/lguest/lguest.txt
@@ -37,7 +37,6 @@ Running Lguest:
37 "Paravirtualized guest support" = Y 37 "Paravirtualized guest support" = Y
38 "Lguest guest support" = Y 38 "Lguest guest support" = Y
39 "High Memory Support" = off/4GB 39 "High Memory Support" = off/4GB
40 "PAE (Physical Address Extension) Support" = N
41 "Alignment value to which kernel should be aligned" = 0x100000 40 "Alignment value to which kernel should be aligned" = 0x100000
42 (CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and 41 (CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and
43 CONFIG_PHYSICAL_ALIGN=0x100000) 42 CONFIG_PHYSICAL_ALIGN=0x100000)
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 421e7d00ffd0..c9abbd86bc18 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -75,9 +75,6 @@ may need to apply in domain-specific ways to their devices:
75struct bus_type { 75struct bus_type {
76 ... 76 ...
77 int (*suspend)(struct device *dev, pm_message_t state); 77 int (*suspend)(struct device *dev, pm_message_t state);
78 int (*suspend_late)(struct device *dev, pm_message_t state);
79
80 int (*resume_early)(struct device *dev);
81 int (*resume)(struct device *dev); 78 int (*resume)(struct device *dev);
82}; 79};
83 80
@@ -226,20 +223,7 @@ The phases are seen by driver notifications issued in this order:
226 223
227 This call should handle parts of device suspend logic that require 224 This call should handle parts of device suspend logic that require
228 sleeping. It probably does work to quiesce the device which hasn't 225 sleeping. It probably does work to quiesce the device which hasn't
229 been abstracted into class.suspend() or bus.suspend_late(). 226 been abstracted into class.suspend().
230
231 3 bus.suspend_late(dev, message) is called with IRQs disabled, and
232 with only one CPU active. Until the bus.resume_early() phase
233 completes (see later), IRQs are not enabled again. This method
234 won't be exposed by all busses; for message based busses like USB,
235 I2C, or SPI, device interactions normally require IRQs. This bus
236 call may be morphed into a driver call with bus-specific parameters.
237
238 This call might save low level hardware state that might otherwise
239 be lost in the upcoming low power state, and actually put the
240 device into a low power state ... so that in some cases the device
241 may stay partly usable until this late. This "late" call may also
242 help when coping with hardware that behaves badly.
243 227
244The pm_message_t parameter is currently used to refine those semantics 228The pm_message_t parameter is currently used to refine those semantics
245(described later). 229(described later).
@@ -351,19 +335,11 @@ devices processing each phase's calls before the next phase begins.
351 335
352The phases are seen by driver notifications issued in this order: 336The phases are seen by driver notifications issued in this order:
353 337
354 1 bus.resume_early(dev) is called with IRQs disabled, and with 338 1 bus.resume(dev) reverses the effects of bus.suspend(). This may
355 only one CPU active. As with bus.suspend_late(), this method 339 be morphed into a device driver call with bus-specific parameters;
356 won't be supported on busses that require IRQs in order to 340 implementations may sleep.
357 interact with devices.
358
359 This reverses the effects of bus.suspend_late().
360
361 2 bus.resume(dev) is called next. This may be morphed into a device
362 driver call with bus-specific parameters; implementations may sleep.
363
364 This reverses the effects of bus.suspend().
365 341
366 3 class.resume(dev) is called for devices associated with a class 342 2 class.resume(dev) is called for devices associated with a class
367 that has such a method. Implementations may sleep. 343 that has such a method. Implementations may sleep.
368 344
369 This reverses the effects of class.suspend(), and would usually 345 This reverses the effects of class.suspend(), and would usually
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 012858d2b119..5c08d96f407c 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -460,6 +460,25 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
460 460
461 The power-management is supported. 461 The power-management is supported.
462 462
463 Module snd-ctxfi
464 ----------------
465
466 Module for Creative Sound Blaster X-Fi boards (20k1 / 20k2 chips)
467 * Creative Sound Blaster X-Fi Titanium Fatal1ty Champion Series
468 * Creative Sound Blaster X-Fi Titanium Fatal1ty Professional Series
469 * Creative Sound Blaster X-Fi Titanium Professional Audio
470 * Creative Sound Blaster X-Fi Titanium
471 * Creative Sound Blaster X-Fi Elite Pro
472 * Creative Sound Blaster X-Fi Platinum
473 * Creative Sound Blaster X-Fi Fatal1ty
474 * Creative Sound Blaster X-Fi XtremeGamer
475 * Creative Sound Blaster X-Fi XtremeMusic
476
477 reference_rate - reference sample rate, 44100 or 48000 (default)
478 multiple - multiple to ref. sample rate, 1 or 2 (default)
479
480 This module supports multiple cards.
481
463 Module snd-darla20 482 Module snd-darla20
464 ------------------ 483 ------------------
465 484
@@ -925,6 +944,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
925 * Onkyo SE-90PCI 944 * Onkyo SE-90PCI
926 * Onkyo SE-200PCI 945 * Onkyo SE-200PCI
927 * ESI Juli@ 946 * ESI Juli@
947 * ESI Maya44
928 * Hercules Fortissimo IV 948 * Hercules Fortissimo IV
929 * EGO-SYS WaveTerminal 192M 949 * EGO-SYS WaveTerminal 192M
930 950
@@ -933,7 +953,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
933 prodigy71xt, prodigy71hifi, prodigyhd2, prodigy192, 953 prodigy71xt, prodigy71hifi, prodigyhd2, prodigy192,
934 juli, aureon51, aureon71, universe, ap192, k8x800, 954 juli, aureon51, aureon71, universe, ap192, k8x800,
935 phase22, phase28, ms300, av710, se200pci, se90pci, 955 phase22, phase28, ms300, av710, se200pci, se90pci,
936 fortissimo4, sn25p, WT192M 956 fortissimo4, sn25p, WT192M, maya44
937 957
938 This module supports multiple cards and autoprobe. 958 This module supports multiple cards and autoprobe.
939 959
@@ -1093,6 +1113,13 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
1093 This module supports multiple cards. 1113 This module supports multiple cards.
1094 The driver requires the firmware loader support on kernel. 1114 The driver requires the firmware loader support on kernel.
1095 1115
1116 Module snd-lx6464es
1117 -------------------
1118
1119 Module for Digigram LX6464ES boards
1120
1121 This module supports multiple cards.
1122
1096 Module snd-maestro3 1123 Module snd-maestro3
1097 ------------------- 1124 -------------------
1098 1125
@@ -1543,13 +1570,15 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
1543 Module snd-sc6000 1570 Module snd-sc6000
1544 ----------------- 1571 -----------------
1545 1572
1546 Module for Gallant SC-6000 soundcard. 1573 Module for Gallant SC-6000 soundcard and later models: SC-6600
1574 and SC-7000.
1547 1575
1548 port - Port # (0x220 or 0x240) 1576 port - Port # (0x220 or 0x240)
1549 mss_port - MSS Port # (0x530 or 0xe80) 1577 mss_port - MSS Port # (0x530 or 0xe80)
1550 irq - IRQ # (5,7,9,10,11) 1578 irq - IRQ # (5,7,9,10,11)
1551 mpu_irq - MPU-401 IRQ # (5,7,9,10) ,0 - no MPU-401 irq 1579 mpu_irq - MPU-401 IRQ # (5,7,9,10) ,0 - no MPU-401 irq
1552 dma - DMA # (1,3,0) 1580 dma - DMA # (1,3,0)
1581 joystick - Enable gameport - 0 = disable (default), 1 = enable
1553 1582
1554 This module supports multiple cards. 1583 This module supports multiple cards.
1555 1584
@@ -1859,7 +1888,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
1859 ------------------- 1888 -------------------
1860 1889
1861 Module for sound cards based on the Asus AV100/AV200 chips, 1890 Module for sound cards based on the Asus AV100/AV200 chips,
1862 i.e., Xonar D1, DX, D2, D2X, HDAV1.3 (Deluxe), and Essence STX. 1891 i.e., Xonar D1, DX, D2, D2X, HDAV1.3 (Deluxe), Essence ST
1892 (Deluxe) and Essence STX.
1863 1893
1864 This module supports autoprobe and multiple cards. 1894 This module supports autoprobe and multiple cards.
1865 1895
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 322869fc8a9e..de8e10a94103 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -36,6 +36,7 @@ ALC260
36 acer Acer TravelMate 36 acer Acer TravelMate
37 will Will laptops (PB V7900) 37 will Will laptops (PB V7900)
38 replacer Replacer 672V 38 replacer Replacer 672V
39 favorit100 Maxdata Favorit 100XS
39 basic fixed pin assignment (old default model) 40 basic fixed pin assignment (old default model)
40 test for testing/debugging purpose, almost all controls can 41 test for testing/debugging purpose, almost all controls can
41 adjusted. Appearing only when compiled with 42 adjusted. Appearing only when compiled with
@@ -85,10 +86,11 @@ ALC269
85 eeepc-p703 ASUS Eeepc P703 P900A 86 eeepc-p703 ASUS Eeepc P703 P900A
86 eeepc-p901 ASUS Eeepc P901 S101 87 eeepc-p901 ASUS Eeepc P901 S101
87 fujitsu FSC Amilo 88 fujitsu FSC Amilo
89 lifebook Fujitsu Lifebook S6420
88 auto auto-config reading BIOS (default) 90 auto auto-config reading BIOS (default)
89 91
90ALC662/663 92ALC662/663/272
91========== 93==============
92 3stack-dig 3-stack (2-channel) with SPDIF 94 3stack-dig 3-stack (2-channel) with SPDIF
93 3stack-6ch 3-stack (6-channel) 95 3stack-6ch 3-stack (6-channel)
94 3stack-6ch-dig 3-stack (6-channel) with SPDIF 96 3stack-6ch-dig 3-stack (6-channel) with SPDIF
@@ -107,6 +109,9 @@ ALC662/663
107 asus-mode4 ASUS 109 asus-mode4 ASUS
108 asus-mode5 ASUS 110 asus-mode5 ASUS
109 asus-mode6 ASUS 111 asus-mode6 ASUS
112 dell Dell with ALC272
113 dell-zm1 Dell ZM1 with ALC272
114 samsung-nc10 Samsung NC10 mini notebook
110 auto auto-config reading BIOS (default) 115 auto auto-config reading BIOS (default)
111 116
112ALC882/885 117ALC882/885
@@ -118,6 +123,7 @@ ALC882/885
118 asus-a7j ASUS A7J 123 asus-a7j ASUS A7J
119 asus-a7m ASUS A7M 124 asus-a7m ASUS A7M
120 macpro MacPro support 125 macpro MacPro support
126 mb5 Macbook 5,1
121 mbp3 Macbook Pro rev3 127 mbp3 Macbook Pro rev3
122 imac24 iMac 24'' with jack detection 128 imac24 iMac 24'' with jack detection
123 w2jc ASUS W2JC 129 w2jc ASUS W2JC
@@ -133,10 +139,12 @@ ALC883/888
133 acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc) 139 acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc)
134 acer-aspire Acer Aspire 9810 140 acer-aspire Acer Aspire 9810
135 acer-aspire-4930g Acer Aspire 4930G 141 acer-aspire-4930g Acer Aspire 4930G
142 acer-aspire-8930g Acer Aspire 8930G
136 medion Medion Laptops 143 medion Medion Laptops
137 medion-md2 Medion MD2 144 medion-md2 Medion MD2
138 targa-dig Targa/MSI 145 targa-dig Targa/MSI
139 targa-2ch-dig Targs/MSI with 2-channel 146 targa-2ch-dig Targa/MSI with 2-channel
147 targa-8ch-dig Targa/MSI with 8-channel (MSI GX620)
140 laptop-eapd 3-jack with SPDIF I/O and EAPD (Clevo M540JE, M550JE) 148 laptop-eapd 3-jack with SPDIF I/O and EAPD (Clevo M540JE, M550JE)
141 lenovo-101e Lenovo 101E 149 lenovo-101e Lenovo 101E
142 lenovo-nb0763 Lenovo NB0763 150 lenovo-nb0763 Lenovo NB0763
@@ -150,6 +158,9 @@ ALC883/888
150 fujitsu-pi2515 Fujitsu AMILO Pi2515 158 fujitsu-pi2515 Fujitsu AMILO Pi2515
151 fujitsu-xa3530 Fujitsu AMILO XA3530 159 fujitsu-xa3530 Fujitsu AMILO XA3530
152 3stack-6ch-intel Intel DG33* boards 160 3stack-6ch-intel Intel DG33* boards
161 asus-p5q ASUS P5Q-EM boards
162 mb31 MacBook 3,1
163 sony-vaio-tt Sony VAIO TT
153 auto auto-config reading BIOS (default) 164 auto auto-config reading BIOS (default)
154 165
155ALC861/660 166ALC861/660
@@ -348,6 +359,7 @@ STAC92HD71B*
348 hp-m4 HP mini 1000 359 hp-m4 HP mini 1000
349 hp-dv5 HP dv series 360 hp-dv5 HP dv series
350 hp-hdx HP HDX series 361 hp-hdx HP HDX series
362 hp-dv4-1222nr HP dv4-1222nr (with LED support)
351 auto BIOS setup (default) 363 auto BIOS setup (default)
352 364
353STAC92HD73* 365STAC92HD73*
diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/alsa/Procfile.txt
index cfac20cf9e33..381908d8ca42 100644
--- a/Documentation/sound/alsa/Procfile.txt
+++ b/Documentation/sound/alsa/Procfile.txt
@@ -88,26 +88,34 @@ card*/pcm*/info
88 substreams, etc. 88 substreams, etc.
89 89
90card*/pcm*/xrun_debug 90card*/pcm*/xrun_debug
91 This file appears when CONFIG_SND_DEBUG=y. 91 This file appears when CONFIG_SND_DEBUG=y and
92 This shows the status of xrun (= buffer overrun/xrun) debug of 92 CONFIG_PCM_XRUN_DEBUG=y.
93 ALSA PCM middle layer, as an integer from 0 to 2. The value 93 This shows the status of xrun (= buffer overrun/xrun) and
94 can be changed by writing to this file, such as 94 invalid PCM position debug/check of ALSA PCM middle layer.
95 95 It takes an integer value, can be changed by writing to this
96 # cat 2 > /proc/asound/card0/pcm0p/xrun_debug 96 file, such as
97 97
98 When this value is greater than 0, the driver will show the 98 # cat 5 > /proc/asound/card0/pcm0p/xrun_debug
99 messages to kernel log when an xrun is detected. The debug 99
100 message is shown also when the invalid H/W pointer is detected 100 The value consists of the following bit flags:
101 at the update of periods (usually called from the interrupt 101 bit 0 = Enable XRUN/jiffies debug messages
102 bit 1 = Show stack trace at XRUN / jiffies check
103 bit 2 = Enable additional jiffies check
104
105 When the bit 0 is set, the driver will show the messages to
106 kernel log when an xrun is detected. The debug message is
107 shown also when the invalid H/W pointer is detected at the
108 update of periods (usually called from the interrupt
102 handler). 109 handler).
103 110
104 When this value is greater than 1, the driver will show the 111 When the bit 1 is set, the driver will show the stack trace
105 stack trace additionally. This may help the debugging. 112 additionally. This may help the debugging.
106 113
107 Since 2.6.30, this option also enables the hwptr check using 114 Since 2.6.30, this option can enable the hwptr check using
108 jiffies. This detects spontaneous invalid pointer callback 115 jiffies. This detects spontaneous invalid pointer callback
109 values, but can be lead to too much corrections for a (mostly 116 values, but can be lead to too much corrections for a (mostly
110 buggy) hardware that doesn't give smooth pointer updates. 117 buggy) hardware that doesn't give smooth pointer updates.
118 This feature is enabled via the bit 2.
111 119
112card*/pcm*/sub*/info 120card*/pcm*/sub*/info
113 The general information of this PCM sub-stream. 121 The general information of this PCM sub-stream.
diff --git a/Documentation/sound/alsa/README.maya44 b/Documentation/sound/alsa/README.maya44
new file mode 100644
index 000000000000..0e41576fa13e
--- /dev/null
+++ b/Documentation/sound/alsa/README.maya44
@@ -0,0 +1,163 @@
1NOTE: The following is the original document of Rainer's patch that the
2current maya44 code based on. Some contents might be obsoleted, but I
3keep here as reference -- tiwai
4
5----------------------------------------------------------------
6
7STATE OF DEVELOPMENT:
8
9This driver is being developed on the initiative of Piotr Makowski (oponek@gmail.com) and financed by Lars Bergmann.
10Development is carried out by Rainer Zimmermann (mail@lightshed.de).
11
12ESI provided a sample Maya44 card for the development work.
13
14However, unfortunately it has turned out difficult to get detailed programming information, so I (Rainer Zimmermann) had to find out some card-specific information by experiment and conjecture. Some information (in particular, several GPIO bits) is still missing.
15
16This is the first testing version of the Maya44 driver released to the alsa-devel mailing list (Feb 5, 2008).
17
18
19The following functions work, as tested by Rainer Zimmermann and Piotr Makowski:
20
21- playback and capture at all sampling rates
22- input/output level
23- crossmixing
24- line/mic switch
25- phantom power switch
26- analogue monitor a.k.a bypass
27
28
29The following functions *should* work, but are not fully tested:
30
31- Channel 3+4 analogue - S/PDIF input switching
32- S/PDIF output
33- all inputs/outputs on the M/IO/DIO extension card
34- internal/external clock selection
35
36
37*In particular, we would appreciate testing of these functions by anyone who has access to an M/IO/DIO extension card.*
38
39
40Things that do not seem to work:
41
42- The level meters ("multi track") in 'alsamixer' do not seem to react to signals in (if this is a bug, it would probably be in the existing ICE1724 code).
43
44- Ardour 2.1 seems to work only via JACK, not using ALSA directly or via OSS. This still needs to be tracked down.
45
46
47DRIVER DETAILS:
48
49the following files were added:
50
51pci/ice1724/maya44.c - Maya44 specific code
52pci/ice1724/maya44.h
53pci/ice1724/ice1724.patch
54pci/ice1724/ice1724.h.patch - PROPOSED patch to ice1724.h (see SAMPLING RATES)
55i2c/other/wm8776.c - low-level access routines for Wolfson WM8776 codecs
56include/wm8776.h
57
58
59Note that the wm8776.c code is meant to be card-independent and does not actually register the codec with the ALSA infrastructure.
60This is done in maya44.c, mainly because some of the WM8776 controls are used in Maya44-specific ways, and should be named appropriately.
61
62
63the following files were created in pci/ice1724, simply #including the corresponding file from the alsa-kernel tree:
64
65wtm.h
66vt1720_mobo.h
67revo.h
68prodigy192.h
69pontis.h
70phase.h
71maya44.h
72juli.h
73aureon.h
74amp.h
75envy24ht.h
76se.h
77prodigy_hifi.h
78
79
80*I hope this is the correct way to do things.*
81
82
83SAMPLING RATES:
84
85The Maya44 card (or more exactly, the Wolfson WM8776 codecs) allow a maximum sampling rate of 192 kHz for playback and 92 kHz for capture.
86
87As the ICE1724 chip only allows one global sampling rate, this is handled as follows:
88
89* setting the sampling rate on any open PCM device on the maya44 card will always set the *global* sampling rate for all playback and capture channels.
90
91* In the current state of the driver, setting rates of up to 192 kHz is permitted even for capture devices.
92
93*AVOID CAPTURING AT RATES ABOVE 96kHz*, even though it may appear to work. The codec cannot actually capture at such rates, meaning poor quality.
94
95
96I propose some additional code for limiting the sampling rate when setting on a capture pcm device. However because of the global sampling rate, this logic would be somewhat problematic.
97
98The proposed code (currently deactivated) is in ice1712.h.patch, ice1724.c and maya44.c (in pci/ice1712).
99
100
101SOUND DEVICES:
102
103PCM devices correspond to inputs/outputs as follows (assuming Maya44 is card #0):
104
105hw:0,0 input - stereo, analog input 1+2
106hw:0,0 output - stereo, analog output 1+2
107hw:0,1 input - stereo, analog input 3+4 OR S/PDIF input
108hw:0,1 output - stereo, analog output 3+4 (and SPDIF out)
109
110
111NAMING OF MIXER CONTROLS:
112
113(for more information about the signal flow, please refer to the block diagram on p.24 of the ESI Maya44 manual, or in the ESI windows software).
114
115
116PCM: (digital) output level for channel 1+2
117PCM 1: same for channel 3+4
118
119Mic Phantom+48V: switch for +48V phantom power for electrostatic microphones on input 1/2.
120 Make sure this is not turned on while any other source is connected to input 1/2.
121 It might damage the source and/or the maya44 card.
122
123Mic/Line input: if switch is is on, input jack 1/2 is microphone input (mono), otherwise line input (stereo).
124
125Bypass: analogue bypass from ADC input to output for channel 1+2. Same as "Monitor" in the windows driver.
126Bypass 1: same for channel 3+4.
127
128Crossmix: cross-mixer from channels 1+2 to channels 3+4
129Crossmix 1: cross-mixer from channels 3+4 to channels 1+2
130
131IEC958 Output: switch for S/PDIF output.
132 This is not supported by the ESI windows driver.
133 S/PDIF should output the same signal as channel 3+4. [untested!]
134
135
136Digitial output selectors:
137
138 These switches allow a direct digital routing from the ADCs to the DACs.
139 Each switch determines where the digital input data to one of the DACs comes from.
140 They are not supported by the ESI windows driver.
141 For normal operation, they should all be set to "PCM out".
142
143H/W: Output source channel 1
144H/W 1: Output source channel 2
145H/W 2: Output source channel 3
146H/W 3: Output source channel 4
147
148H/W 4 ... H/W 9: unknown function, left in to enable testing.
149 Possibly some of these control S/PDIF output(s).
150 If these turn out to be unused, they will go away in later driver versions.
151
152Selectable values for each of the digital output selectors are:
153 "PCM out" -> DAC output of the corresponding channel (default setting)
154 "Input 1"...
155 "Input 4" -> direct routing from ADC output of the selected input channel
156
157
158--------
159
160Feb 14, 2008
161Rainer Zimmermann
162mail@lightshed.de
163
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt
index 9e6763264a2e..9ac842be9b4f 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/alsa/soc/dapm.txt
@@ -62,6 +62,7 @@ Audio DAPM widgets fall into a number of types:-
62 o Mic - Mic (and optional Jack) 62 o Mic - Mic (and optional Jack)
63 o Line - Line Input/Output (and optional Jack) 63 o Line - Line Input/Output (and optional Jack)
64 o Speaker - Speaker 64 o Speaker - Speaker
65 o Supply - Power or clock supply widget used by other widgets.
65 o Pre - Special PRE widget (exec before all others) 66 o Pre - Special PRE widget (exec before all others)
66 o Post - Special POST widget (exec after all others) 67 o Post - Special POST widget (exec after all others)
67 68
diff --git a/MAINTAINERS b/MAINTAINERS
index c944d618dc83..90f81283b722 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3363,6 +3363,16 @@ F: drivers/serial/kgdboc.c
3363F: include/linux/kgdb.h 3363F: include/linux/kgdb.h
3364F: kernel/kgdb.c 3364F: kernel/kgdb.c
3365 3365
3366KMEMLEAK
3367P: Catalin Marinas
3368M: catalin.marinas@arm.com
3369L: linux-kernel@vger.kernel.org
3370S: Maintained
3371F: Documentation/kmemleak.txt
3372F: include/linux/kmemleak.h
3373F: mm/kmemleak.c
3374F: mm/kmemleak-test.c
3375
3366KMEMTRACE 3376KMEMTRACE
3367P: Eduard - Gabriel Munteanu 3377P: Eduard - Gabriel Munteanu
3368M: eduard.munteanu@linux360.ro 3378M: eduard.munteanu@linux360.ro
@@ -3372,12 +3382,6 @@ F: Documentation/trace/kmemtrace.txt
3372F: include/trace/kmemtrace.h 3382F: include/trace/kmemtrace.h
3373F: kernel/trace/kmemtrace.c 3383F: kernel/trace/kmemtrace.c
3374 3384
3375KMEMLEAK
3376P: Catalin Marinas
3377M: catalin.marinas@arm.com
3378L: linux-kernel@vger.kernel.org
3379S: Maintained
3380
3381KPROBES 3385KPROBES
3382P: Ananth N Mavinakayanahalli 3386P: Ananth N Mavinakayanahalli
3383M: ananth@in.ibm.com 3387M: ananth@in.ibm.com
@@ -4603,7 +4607,8 @@ F: drivers/pcmcia/pxa2xx*
4603F: drivers/spi/pxa2xx* 4607F: drivers/spi/pxa2xx*
4604F: drivers/usb/gadget/pxa2* 4608F: drivers/usb/gadget/pxa2*
4605F: include/sound/pxa2xx-lib.h 4609F: include/sound/pxa2xx-lib.h
4606F: sound/soc/pxa/pxa2xx* 4610F: sound/arm/pxa*
4611F: sound/soc/pxa
4607 4612
4608PXA168 SUPPORT 4613PXA168 SUPPORT
4609P: Eric Miao 4614P: Eric Miao
@@ -5331,11 +5336,12 @@ P: Liam Girdwood
5331M: lrg@slimlogic.co.uk 5336M: lrg@slimlogic.co.uk
5332P: Mark Brown 5337P: Mark Brown
5333M: broonie@opensource.wolfsonmicro.com 5338M: broonie@opensource.wolfsonmicro.com
5334T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc 5339T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git
5335L: alsa-devel@alsa-project.org (subscribers-only) 5340L: alsa-devel@alsa-project.org (subscribers-only)
5336W: http://alsa-project.org/main/index.php/ASoC 5341W: http://alsa-project.org/main/index.php/ASoC
5337S: Supported 5342S: Supported
5338F: sound/soc/ 5343F: sound/soc/
5344F: include/sound/soc*
5339 5345
5340SPARC + UltraSPARC (sparc/sparc64) 5346SPARC + UltraSPARC (sparc/sparc64)
5341P: David S. Miller 5347P: David S. Miller
diff --git a/arch/alpha/include/asm/suspend.h b/arch/alpha/include/asm/suspend.h
deleted file mode 100644
index c7042d575851..000000000000
--- a/arch/alpha/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ALPHA_SUSPEND_H
2#define __ALPHA_SUSPEND_H
3
4/* Dummy include. */
5
6#endif /* __ALPHA_SUSPEND_H */
diff --git a/arch/alpha/mm/extable.c b/arch/alpha/mm/extable.c
index 62dc379d301a..813c9b63c0e1 100644
--- a/arch/alpha/mm/extable.c
+++ b/arch/alpha/mm/extable.c
@@ -48,6 +48,27 @@ void sort_extable(struct exception_table_entry *start,
48 cmp_ex, swap_ex); 48 cmp_ex, swap_ex);
49} 49}
50 50
51#ifdef CONFIG_MODULES
52/*
53 * Any entry referring to the module init will be at the beginning or
54 * the end.
55 */
56void trim_init_extable(struct module *m)
57{
58 /*trim the beginning*/
59 while (m->num_exentries &&
60 within_module_init(ex_to_addr(&m->extable[0]), m)) {
61 m->extable++;
62 m->num_exentries--;
63 }
64 /*trim the end*/
65 while (m->num_exentries &&
66 within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]),
67 m))
68 m->num_exentries--;
69}
70#endif /* CONFIG_MODULES */
71
51const struct exception_table_entry * 72const struct exception_table_entry *
52search_extable(const struct exception_table_entry *first, 73search_extable(const struct exception_table_entry *first,
53 const struct exception_table_entry *last, 74 const struct exception_table_entry *last,
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
deleted file mode 100644
index cf0d0bdee74d..000000000000
--- a/arch/arm/include/asm/suspend.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef _ASMARM_SUSPEND_H
2#define _ASMARM_SUSPEND_H
3
4#endif
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 1167fe9cf6c4..98f94d041d9c 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -32,8 +32,6 @@ void module_free(struct module *mod, void *module_region)
32 mod->arch.syminfo = NULL; 32 mod->arch.syminfo = NULL;
33 33
34 vfree(module_region); 34 vfree(module_region);
35 /* FIXME: if module_region == mod->init_region, trim exception
36 * table entries. */
37} 35}
38 36
39static inline int check_rela(Elf32_Rela *rela, struct module *module, 37static inline int check_rela(Elf32_Rela *rela, struct module *module,
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 3640cdc38aac..c56fd3eb7c10 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -223,6 +223,7 @@ endchoice
223 223
224config SMP 224config SMP
225 depends on BF561 225 depends on BF561
226 select GENERIC_TIME
226 bool "Symmetric multi-processing support" 227 bool "Symmetric multi-processing support"
227 ---help--- 228 ---help---
228 This enables support for systems with more than one CPU, 229 This enables support for systems with more than one CPU,
@@ -241,12 +242,6 @@ config IRQ_PER_CPU
241 depends on SMP 242 depends on SMP
242 default y 243 default y
243 244
244config TICK_SOURCE_SYSTMR0
245 bool
246 select BFIN_GPTIMERS
247 depends on SMP
248 default y
249
250config BF_REV_MIN 245config BF_REV_MIN
251 int 246 int
252 default 0 if (BF51x || BF52x || (BF54x && !BF54xM)) 247 default 0 if (BF51x || BF52x || (BF54x && !BF54xM))
@@ -263,8 +258,8 @@ config BF_REV_MAX
263 258
264choice 259choice
265 prompt "Silicon Rev" 260 prompt "Silicon Rev"
266 default BF_REV_0_1 if (BF51x || BF52x || (BF54x && !BF54xM)) 261 default BF_REV_0_0 if (BF51x || BF52x)
267 default BF_REV_0_2 if (BF534 || BF536 || BF537) 262 default BF_REV_0_2 if (BF534 || BF536 || BF537 || (BF54x && !BF54xM))
268 default BF_REV_0_3 if (BF531 || BF532 || BF533 || BF54xM || BF561) 263 default BF_REV_0_3 if (BF531 || BF532 || BF533 || BF54xM || BF561)
269 264
270config BF_REV_0_0 265config BF_REV_0_0
@@ -607,7 +602,6 @@ source kernel/Kconfig.hz
607 602
608config GENERIC_TIME 603config GENERIC_TIME
609 bool "Generic time" 604 bool "Generic time"
610 depends on !SMP
611 default y 605 default y
612 606
613config GENERIC_CLOCKEVENTS 607config GENERIC_CLOCKEVENTS
@@ -615,12 +609,26 @@ config GENERIC_CLOCKEVENTS
615 depends on GENERIC_TIME 609 depends on GENERIC_TIME
616 default y 610 default y
617 611
612choice
613 prompt "Kernel Tick Source"
614 depends on GENERIC_CLOCKEVENTS
615 default TICKSOURCE_CORETMR
616
617config TICKSOURCE_GPTMR0
618 bool "Gptimer0 (SCLK domain)"
619 select BFIN_GPTIMERS
620 depends on !IPIPE
621
622config TICKSOURCE_CORETMR
623 bool "Core timer (CCLK domain)"
624
625endchoice
626
618config CYCLES_CLOCKSOURCE 627config CYCLES_CLOCKSOURCE
619 bool "Use 'CYCLES' as a clocksource (EXPERIMENTAL)" 628 bool "Use 'CYCLES' as a clocksource"
620 depends on EXPERIMENTAL
621 depends on GENERIC_CLOCKEVENTS 629 depends on GENERIC_CLOCKEVENTS
622 depends on !BFIN_SCRATCH_REG_CYCLES 630 depends on !BFIN_SCRATCH_REG_CYCLES
623 default n 631 depends on !SMP
624 help 632 help
625 If you say Y here, you will enable support for using the 'cycles' 633 If you say Y here, you will enable support for using the 'cycles'
626 registers as a clock source. Doing so means you will be unable to 634 registers as a clock source. Doing so means you will be unable to
@@ -628,6 +636,11 @@ config CYCLES_CLOCKSOURCE
628 still be able to read it (such as for performance monitoring), but 636 still be able to read it (such as for performance monitoring), but
629 writing the registers will most likely crash the kernel. 637 writing the registers will most likely crash the kernel.
630 638
639config GPTMR0_CLOCKSOURCE
640 bool "Use GPTimer0 as a clocksource (higher rating)"
641 depends on GENERIC_CLOCKEVENTS
642 depends on !TICKSOURCE_GPTMR0
643
631source kernel/time/Kconfig 644source kernel/time/Kconfig
632 645
633comment "Misc" 646comment "Misc"
@@ -808,7 +821,7 @@ config APP_STACK_L1
808config EXCEPTION_L1_SCRATCH 821config EXCEPTION_L1_SCRATCH
809 bool "Locate exception stack in L1 Scratch Memory" 822 bool "Locate exception stack in L1 Scratch Memory"
810 default n 823 default n
811 depends on !APP_STACK_L1 && !SYSCALL_TAB_L1 824 depends on !APP_STACK_L1
812 help 825 help
813 Whenever an exception occurs, use the L1 Scratch memory for 826 Whenever an exception occurs, use the L1 Scratch memory for
814 stack storage. You cannot place the stacks of FLAT binaries 827 stack storage. You cannot place the stacks of FLAT binaries
@@ -901,7 +914,7 @@ config BFIN_ICACHE_LOCK
901 bool "Enable Instruction Cache Locking" 914 bool "Enable Instruction Cache Locking"
902 915
903choice 916choice
904 prompt "Policy" 917 prompt "External memory cache policy"
905 depends on BFIN_DCACHE 918 depends on BFIN_DCACHE
906 default BFIN_WB if !SMP 919 default BFIN_WB if !SMP
907 default BFIN_WT if SMP 920 default BFIN_WT if SMP
@@ -942,12 +955,22 @@ config BFIN_WT
942 955
943endchoice 956endchoice
944 957
945config BFIN_L2_CACHEABLE 958choice
946 bool "Cache L2 SRAM" 959 prompt "L2 SRAM cache policy"
947 depends on (BFIN_DCACHE || BFIN_ICACHE) && (BF54x || (BF561 && !SMP)) 960 depends on (BF54x || BF561)
948 default n 961 default BFIN_L2_WT
949 help 962config BFIN_L2_WB
950 Select to make L2 SRAM cacheable in L1 data and instruction cache. 963 bool "Write back"
964 depends on !SMP
965
966config BFIN_L2_WT
967 bool "Write through"
968 depends on !SMP
969
970config BFIN_L2_NOT_CACHED
971 bool "Not cached"
972
973endchoice
951 974
952config MPU 975config MPU
953 bool "Enable the memory protection unit (EXPERIMENTAL)" 976 bool "Enable the memory protection unit (EXPERIMENTAL)"
@@ -1011,21 +1034,34 @@ endmenu
1011 1034
1012menu "EBIU_AMBCTL Control" 1035menu "EBIU_AMBCTL Control"
1013config BANK_0 1036config BANK_0
1014 hex "Bank 0" 1037 hex "Bank 0 (AMBCTL0.L)"
1015 default 0x7BB0 1038 default 0x7BB0
1039 help
1040 These are the low 16 bits of the EBIU_AMBCTL0 MMR which are
1041 used to control the Asynchronous Memory Bank 0 settings.
1016 1042
1017config BANK_1 1043config BANK_1
1018 hex "Bank 1" 1044 hex "Bank 1 (AMBCTL0.H)"
1019 default 0x7BB0 1045 default 0x7BB0
1020 default 0x5558 if BF54x 1046 default 0x5558 if BF54x
1047 help
1048 These are the high 16 bits of the EBIU_AMBCTL0 MMR which are
1049 used to control the Asynchronous Memory Bank 1 settings.
1021 1050
1022config BANK_2 1051config BANK_2
1023 hex "Bank 2" 1052 hex "Bank 2 (AMBCTL1.L)"
1024 default 0x7BB0 1053 default 0x7BB0
1054 help
1055 These are the low 16 bits of the EBIU_AMBCTL1 MMR which are
1056 used to control the Asynchronous Memory Bank 2 settings.
1025 1057
1026config BANK_3 1058config BANK_3
1027 hex "Bank 3" 1059 hex "Bank 3 (AMBCTL1.H)"
1028 default 0x99B3 1060 default 0x99B3
1061 help
1062 These are the high 16 bits of the EBIU_AMBCTL1 MMR which are
1063 used to control the Asynchronous Memory Bank 3 settings.
1064
1029endmenu 1065endmenu
1030 1066
1031config EBIU_MBSCTLVAL 1067config EBIU_MBSCTLVAL
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 79e7e63ab709..1fc4981d486f 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -54,6 +54,19 @@ config DEBUG_HWERR
54 hardware error interrupts and need to know where they are coming 54 hardware error interrupts and need to know where they are coming
55 from. 55 from.
56 56
57config EXACT_HWERR
58 bool "Try to make Hardware errors exact"
59 depends on DEBUG_HWERR
60 help
61 By default, the Blackfin hardware errors are not exact - the error
62 be reported multiple cycles after the error happens. This delay
63 can cause the wrong application, or even the kernel to receive a
64 signal to be killed. If you are getting HW errors in your system,
65 try turning this on to ensure they are at least comming from the
66 proper thread.
67
68 On production systems, it is safe (and a small optimization) to say N.
69
57config DEBUG_DOUBLEFAULT 70config DEBUG_DOUBLEFAULT
58 bool "Debug Double Faults" 71 bool "Debug Double Faults"
59 default n 72 default n
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index c121d6e6e2b8..baec1337f282 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -259,7 +259,10 @@ CONFIG_HZ=250
259# CONFIG_SCHED_HRTICK is not set 259# CONFIG_SCHED_HRTICK is not set
260CONFIG_GENERIC_TIME=y 260CONFIG_GENERIC_TIME=y
261CONFIG_GENERIC_CLOCKEVENTS=y 261CONFIG_GENERIC_CLOCKEVENTS=y
262# CONFIG_TICKSOURCE_GPTMR0 is not set
263CONFIG_TICKSOURCE_CORETMR=y
262# CONFIG_CYCLES_CLOCKSOURCE is not set 264# CONFIG_CYCLES_CLOCKSOURCE is not set
265# CONFIG_GPTMR0_CLOCKSOURCE is not set
263# CONFIG_NO_HZ is not set 266# CONFIG_NO_HZ is not set
264# CONFIG_HIGH_RES_TIMERS is not set 267# CONFIG_HIGH_RES_TIMERS is not set
265CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 268CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -404,7 +407,7 @@ CONFIG_IP_PNP=y
404# CONFIG_NET_IPIP is not set 407# CONFIG_NET_IPIP is not set
405# CONFIG_NET_IPGRE is not set 408# CONFIG_NET_IPGRE is not set
406# CONFIG_ARPD is not set 409# CONFIG_ARPD is not set
407CONFIG_SYN_COOKIES=y 410# CONFIG_SYN_COOKIES is not set
408# CONFIG_INET_AH is not set 411# CONFIG_INET_AH is not set
409# CONFIG_INET_ESP is not set 412# CONFIG_INET_ESP is not set
410# CONFIG_INET_IPCOMP is not set 413# CONFIG_INET_IPCOMP is not set
@@ -688,14 +691,14 @@ CONFIG_INPUT_MISC=y
688# Character devices 691# Character devices
689# 692#
690# CONFIG_AD9960 is not set 693# CONFIG_AD9960 is not set
691# CONFIG_SPI_ADC_BF533 is not set 694CONFIG_BFIN_DMA_INTERFACE=m
692# CONFIG_BF5xx_PPIFCD is not set 695# CONFIG_BFIN_PPI is not set
696# CONFIG_BFIN_PPIFCD is not set
693# CONFIG_BFIN_SIMPLE_TIMER is not set 697# CONFIG_BFIN_SIMPLE_TIMER is not set
694# CONFIG_BF5xx_PPI is not set 698# CONFIG_BFIN_SPI_ADC is not set
695# CONFIG_BFIN_SPORT is not set 699# CONFIG_BFIN_SPORT is not set
696# CONFIG_BFIN_TIMER_LATENCY is not set 700# CONFIG_BFIN_TIMER_LATENCY is not set
697# CONFIG_TWI_LCD is not set 701# CONFIG_BFIN_TWI_LCD is not set
698CONFIG_BFIN_DMA_INTERFACE=m
699CONFIG_SIMPLE_GPIO=m 702CONFIG_SIMPLE_GPIO=m
700CONFIG_VT=y 703CONFIG_VT=y
701CONFIG_CONSOLE_TRANSLATIONS=y 704CONFIG_CONSOLE_TRANSLATIONS=y
@@ -802,7 +805,30 @@ CONFIG_SPI_BFIN=y
802# CONFIG_SPI_SPIDEV is not set 805# CONFIG_SPI_SPIDEV is not set
803# CONFIG_SPI_TLE62X0 is not set 806# CONFIG_SPI_TLE62X0 is not set
804CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 807CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
805# CONFIG_GPIOLIB is not set 808CONFIG_GPIOLIB=y
809# CONFIG_DEBUG_GPIO is not set
810CONFIG_GPIO_SYSFS=y
811
812#
813# Memory mapped GPIO expanders:
814#
815
816#
817# I2C GPIO expanders:
818#
819# CONFIG_GPIO_MAX732X is not set
820# CONFIG_GPIO_PCA953X is not set
821# CONFIG_GPIO_PCF857X is not set
822
823#
824# PCI GPIO expanders:
825#
826
827#
828# SPI GPIO expanders:
829#
830# CONFIG_GPIO_MAX7301 is not set
831# CONFIG_GPIO_MCP23S08 is not set
806# CONFIG_W1 is not set 832# CONFIG_W1 is not set
807# CONFIG_POWER_SUPPLY is not set 833# CONFIG_POWER_SUPPLY is not set
808# CONFIG_HWMON is not set 834# CONFIG_HWMON is not set
@@ -831,6 +857,7 @@ CONFIG_SSB_POSSIBLE=y
831# CONFIG_HTC_PASIC3 is not set 857# CONFIG_HTC_PASIC3 is not set
832# CONFIG_MFD_TMIO is not set 858# CONFIG_MFD_TMIO is not set
833# CONFIG_PMIC_DA903X is not set 859# CONFIG_PMIC_DA903X is not set
860# CONFIG_PMIC_ADP5520 is not set
834# CONFIG_MFD_WM8400 is not set 861# CONFIG_MFD_WM8400 is not set
835# CONFIG_MFD_WM8350_I2C is not set 862# CONFIG_MFD_WM8350_I2C is not set
836# CONFIG_REGULATOR is not set 863# CONFIG_REGULATOR is not set
@@ -962,7 +989,8 @@ CONFIG_RTC_DRV_BFIN=y
962# 989#
963# File systems 990# File systems
964# 991#
965# CONFIG_EXT2_FS is not set 992CONFIG_EXT2_FS=m
993# CONFIG_EXT2_FS_XATTR is not set
966# CONFIG_EXT3_FS is not set 994# CONFIG_EXT3_FS is not set
967# CONFIG_EXT4_FS is not set 995# CONFIG_EXT4_FS is not set
968# CONFIG_REISERFS_FS is not set 996# CONFIG_REISERFS_FS is not set
@@ -988,8 +1016,11 @@ CONFIG_INOTIFY_USER=y
988# 1016#
989# DOS/FAT/NT Filesystems 1017# DOS/FAT/NT Filesystems
990# 1018#
1019CONFIG_FAT_FS=m
991# CONFIG_MSDOS_FS is not set 1020# CONFIG_MSDOS_FS is not set
992# CONFIG_VFAT_FS is not set 1021CONFIG_VFAT_FS=m
1022CONFIG_FAT_DEFAULT_CODEPAGE=437
1023CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
993# CONFIG_NTFS_FS is not set 1024# CONFIG_NTFS_FS is not set
994 1025
995# 1026#
@@ -1012,8 +1043,8 @@ CONFIG_SYSFS=y
1012# CONFIG_BEFS_FS is not set 1043# CONFIG_BEFS_FS is not set
1013# CONFIG_BFS_FS is not set 1044# CONFIG_BFS_FS is not set
1014# CONFIG_EFS_FS is not set 1045# CONFIG_EFS_FS is not set
1015# CONFIG_YAFFS_FS is not set
1016# CONFIG_JFFS2_FS is not set 1046# CONFIG_JFFS2_FS is not set
1047# CONFIG_YAFFS_FS is not set
1017# CONFIG_CRAMFS is not set 1048# CONFIG_CRAMFS is not set
1018# CONFIG_VXFS_FS is not set 1049# CONFIG_VXFS_FS is not set
1019# CONFIG_MINIX_FS is not set 1050# CONFIG_MINIX_FS is not set
@@ -1048,9 +1079,9 @@ CONFIG_SMB_FS=m
1048# 1079#
1049# CONFIG_PARTITION_ADVANCED is not set 1080# CONFIG_PARTITION_ADVANCED is not set
1050CONFIG_MSDOS_PARTITION=y 1081CONFIG_MSDOS_PARTITION=y
1051CONFIG_NLS=y 1082CONFIG_NLS=m
1052CONFIG_NLS_DEFAULT="iso8859-1" 1083CONFIG_NLS_DEFAULT="iso8859-1"
1053CONFIG_NLS_CODEPAGE_437=y 1084CONFIG_NLS_CODEPAGE_437=m
1054# CONFIG_NLS_CODEPAGE_737 is not set 1085# CONFIG_NLS_CODEPAGE_737 is not set
1055# CONFIG_NLS_CODEPAGE_775 is not set 1086# CONFIG_NLS_CODEPAGE_775 is not set
1056# CONFIG_NLS_CODEPAGE_850 is not set 1087# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1065,7 +1096,7 @@ CONFIG_NLS_CODEPAGE_437=y
1065# CONFIG_NLS_CODEPAGE_865 is not set 1096# CONFIG_NLS_CODEPAGE_865 is not set
1066# CONFIG_NLS_CODEPAGE_866 is not set 1097# CONFIG_NLS_CODEPAGE_866 is not set
1067# CONFIG_NLS_CODEPAGE_869 is not set 1098# CONFIG_NLS_CODEPAGE_869 is not set
1068# CONFIG_NLS_CODEPAGE_936 is not set 1099CONFIG_NLS_CODEPAGE_936=m
1069# CONFIG_NLS_CODEPAGE_950 is not set 1100# CONFIG_NLS_CODEPAGE_950 is not set
1070# CONFIG_NLS_CODEPAGE_932 is not set 1101# CONFIG_NLS_CODEPAGE_932 is not set
1071# CONFIG_NLS_CODEPAGE_949 is not set 1102# CONFIG_NLS_CODEPAGE_949 is not set
@@ -1074,7 +1105,7 @@ CONFIG_NLS_CODEPAGE_437=y
1074# CONFIG_NLS_CODEPAGE_1250 is not set 1105# CONFIG_NLS_CODEPAGE_1250 is not set
1075# CONFIG_NLS_CODEPAGE_1251 is not set 1106# CONFIG_NLS_CODEPAGE_1251 is not set
1076# CONFIG_NLS_ASCII is not set 1107# CONFIG_NLS_ASCII is not set
1077CONFIG_NLS_ISO8859_1=y 1108CONFIG_NLS_ISO8859_1=m
1078# CONFIG_NLS_ISO8859_2 is not set 1109# CONFIG_NLS_ISO8859_2 is not set
1079# CONFIG_NLS_ISO8859_3 is not set 1110# CONFIG_NLS_ISO8859_3 is not set
1080# CONFIG_NLS_ISO8859_4 is not set 1111# CONFIG_NLS_ISO8859_4 is not set
@@ -1087,7 +1118,7 @@ CONFIG_NLS_ISO8859_1=y
1087# CONFIG_NLS_ISO8859_15 is not set 1118# CONFIG_NLS_ISO8859_15 is not set
1088# CONFIG_NLS_KOI8_R is not set 1119# CONFIG_NLS_KOI8_R is not set
1089# CONFIG_NLS_KOI8_U is not set 1120# CONFIG_NLS_KOI8_U is not set
1090# CONFIG_NLS_UTF8 is not set 1121CONFIG_NLS_UTF8=m
1091# CONFIG_DLM is not set 1122# CONFIG_DLM is not set
1092 1123
1093# 1124#
@@ -1102,7 +1133,7 @@ CONFIG_FRAME_WARN=1024
1102CONFIG_DEBUG_FS=y 1133CONFIG_DEBUG_FS=y
1103# CONFIG_HEADERS_CHECK is not set 1134# CONFIG_HEADERS_CHECK is not set
1104CONFIG_DEBUG_KERNEL=y 1135CONFIG_DEBUG_KERNEL=y
1105# CONFIG_DEBUG_SHIRQ is not set 1136CONFIG_DEBUG_SHIRQ=y
1106CONFIG_DETECT_SOFTLOCKUP=y 1137CONFIG_DETECT_SOFTLOCKUP=y
1107# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1138# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1108CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1139CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1111,8 +1142,6 @@ CONFIG_SCHED_DEBUG=y
1111# CONFIG_TIMER_STATS is not set 1142# CONFIG_TIMER_STATS is not set
1112# CONFIG_DEBUG_OBJECTS is not set 1143# CONFIG_DEBUG_OBJECTS is not set
1113# CONFIG_DEBUG_SLAB is not set 1144# CONFIG_DEBUG_SLAB is not set
1114# CONFIG_DEBUG_RT_MUTEXES is not set
1115# CONFIG_RT_MUTEX_TESTER is not set
1116# CONFIG_DEBUG_SPINLOCK is not set 1145# CONFIG_DEBUG_SPINLOCK is not set
1117# CONFIG_DEBUG_MUTEXES is not set 1146# CONFIG_DEBUG_MUTEXES is not set
1118# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1147# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1132,7 +1161,6 @@ CONFIG_DEBUG_INFO=y
1132# CONFIG_BACKTRACE_SELF_TEST is not set 1161# CONFIG_BACKTRACE_SELF_TEST is not set
1133# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1162# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1134# CONFIG_FAULT_INJECTION is not set 1163# CONFIG_FAULT_INJECTION is not set
1135CONFIG_SYSCTL_SYSCALL_CHECK=y
1136 1164
1137# 1165#
1138# Tracers 1166# Tracers
@@ -1148,16 +1176,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1148# CONFIG_DEBUG_STACK_USAGE is not set 1176# CONFIG_DEBUG_STACK_USAGE is not set
1149CONFIG_DEBUG_VERBOSE=y 1177CONFIG_DEBUG_VERBOSE=y
1150CONFIG_DEBUG_MMRS=y 1178CONFIG_DEBUG_MMRS=y
1151# CONFIG_DEBUG_HWERR is not set 1179CONFIG_DEBUG_HWERR=y
1152# CONFIG_DEBUG_DOUBLEFAULT is not set 1180CONFIG_EXACT_HWERR=y
1181CONFIG_DEBUG_DOUBLEFAULT=y
1182CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1183# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1184# CONFIG_DEBUG_ICACHE_CHECK is not set
1153CONFIG_DEBUG_HUNT_FOR_ZERO=y 1185CONFIG_DEBUG_HUNT_FOR_ZERO=y
1154CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1186CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1155CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1187# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1156# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1188CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1157# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1189# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1158CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1190CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1159# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1191# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1160# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1192CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1161CONFIG_EARLY_PRINTK=y 1193CONFIG_EARLY_PRINTK=y
1162CONFIG_CPLB_INFO=y 1194CONFIG_CPLB_INFO=y
1163CONFIG_ACCESS_CHECK=y 1195CONFIG_ACCESS_CHECK=y
@@ -1269,7 +1301,6 @@ CONFIG_CRC32=y
1269# CONFIG_CRC7 is not set 1301# CONFIG_CRC7 is not set
1270# CONFIG_LIBCRC32C is not set 1302# CONFIG_LIBCRC32C is not set
1271CONFIG_ZLIB_INFLATE=y 1303CONFIG_ZLIB_INFLATE=y
1272CONFIG_PLIST=y
1273CONFIG_HAS_IOMEM=y 1304CONFIG_HAS_IOMEM=y
1274CONFIG_HAS_IOPORT=y 1305CONFIG_HAS_IOPORT=y
1275CONFIG_HAS_DMA=y 1306CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 3e562b2775d4..c06262e41f7c 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -144,8 +144,8 @@ CONFIG_BF526=y
144# CONFIG_BF561 is not set 144# CONFIG_BF561 is not set
145CONFIG_BF_REV_MIN=0 145CONFIG_BF_REV_MIN=0
146CONFIG_BF_REV_MAX=2 146CONFIG_BF_REV_MAX=2
147# CONFIG_BF_REV_0_0 is not set 147CONFIG_BF_REV_0_0=y
148CONFIG_BF_REV_0_1=y 148# CONFIG_BF_REV_0_1 is not set
149# CONFIG_BF_REV_0_2 is not set 149# CONFIG_BF_REV_0_2 is not set
150# CONFIG_BF_REV_0_3 is not set 150# CONFIG_BF_REV_0_3 is not set
151# CONFIG_BF_REV_0_4 is not set 151# CONFIG_BF_REV_0_4 is not set
@@ -264,7 +264,10 @@ CONFIG_HZ=250
264# CONFIG_SCHED_HRTICK is not set 264# CONFIG_SCHED_HRTICK is not set
265CONFIG_GENERIC_TIME=y 265CONFIG_GENERIC_TIME=y
266CONFIG_GENERIC_CLOCKEVENTS=y 266CONFIG_GENERIC_CLOCKEVENTS=y
267# CONFIG_TICKSOURCE_GPTMR0 is not set
268CONFIG_TICKSOURCE_CORETMR=y
267# CONFIG_CYCLES_CLOCKSOURCE is not set 269# CONFIG_CYCLES_CLOCKSOURCE is not set
270# CONFIG_GPTMR0_CLOCKSOURCE is not set
268# CONFIG_NO_HZ is not set 271# CONFIG_NO_HZ is not set
269# CONFIG_HIGH_RES_TIMERS is not set 272# CONFIG_HIGH_RES_TIMERS is not set
270CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 273CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -409,7 +412,7 @@ CONFIG_IP_PNP=y
409# CONFIG_NET_IPIP is not set 412# CONFIG_NET_IPIP is not set
410# CONFIG_NET_IPGRE is not set 413# CONFIG_NET_IPGRE is not set
411# CONFIG_ARPD is not set 414# CONFIG_ARPD is not set
412CONFIG_SYN_COOKIES=y 415# CONFIG_SYN_COOKIES is not set
413# CONFIG_INET_AH is not set 416# CONFIG_INET_AH is not set
414# CONFIG_INET_ESP is not set 417# CONFIG_INET_ESP is not set
415# CONFIG_INET_IPCOMP is not set 418# CONFIG_INET_IPCOMP is not set
@@ -491,7 +494,7 @@ CONFIG_MTD_PARTITIONS=y
491# 494#
492# User Modules And Translation Layers 495# User Modules And Translation Layers
493# 496#
494CONFIG_MTD_CHAR=m 497CONFIG_MTD_CHAR=y
495CONFIG_MTD_BLKDEVS=y 498CONFIG_MTD_BLKDEVS=y
496CONFIG_MTD_BLOCK=y 499CONFIG_MTD_BLOCK=y
497# CONFIG_FTL is not set 500# CONFIG_FTL is not set
@@ -504,9 +507,9 @@ CONFIG_MTD_BLOCK=y
504# 507#
505# RAM/ROM/Flash chip drivers 508# RAM/ROM/Flash chip drivers
506# 509#
507# CONFIG_MTD_CFI is not set 510CONFIG_MTD_CFI=y
508CONFIG_MTD_JEDECPROBE=m 511# CONFIG_MTD_JEDECPROBE is not set
509CONFIG_MTD_GEN_PROBE=m 512CONFIG_MTD_GEN_PROBE=y
510# CONFIG_MTD_CFI_ADV_OPTIONS is not set 513# CONFIG_MTD_CFI_ADV_OPTIONS is not set
511CONFIG_MTD_MAP_BANK_WIDTH_1=y 514CONFIG_MTD_MAP_BANK_WIDTH_1=y
512CONFIG_MTD_MAP_BANK_WIDTH_2=y 515CONFIG_MTD_MAP_BANK_WIDTH_2=y
@@ -518,9 +521,10 @@ CONFIG_MTD_CFI_I1=y
518CONFIG_MTD_CFI_I2=y 521CONFIG_MTD_CFI_I2=y
519# CONFIG_MTD_CFI_I4 is not set 522# CONFIG_MTD_CFI_I4 is not set
520# CONFIG_MTD_CFI_I8 is not set 523# CONFIG_MTD_CFI_I8 is not set
521# CONFIG_MTD_CFI_INTELEXT is not set 524CONFIG_MTD_CFI_INTELEXT=y
522# CONFIG_MTD_CFI_AMDSTD is not set 525# CONFIG_MTD_CFI_AMDSTD is not set
523# CONFIG_MTD_CFI_STAA is not set 526# CONFIG_MTD_CFI_STAA is not set
527CONFIG_MTD_CFI_UTIL=y
524CONFIG_MTD_RAM=y 528CONFIG_MTD_RAM=y
525CONFIG_MTD_ROM=m 529CONFIG_MTD_ROM=m
526# CONFIG_MTD_ABSENT is not set 530# CONFIG_MTD_ABSENT is not set
@@ -529,7 +533,8 @@ CONFIG_MTD_ROM=m
529# Mapping drivers for chip access 533# Mapping drivers for chip access
530# 534#
531CONFIG_MTD_COMPLEX_MAPPINGS=y 535CONFIG_MTD_COMPLEX_MAPPINGS=y
532# CONFIG_MTD_PHYSMAP is not set 536CONFIG_MTD_PHYSMAP=y
537# CONFIG_MTD_PHYSMAP_COMPAT is not set
533# CONFIG_MTD_GPIO_ADDR is not set 538# CONFIG_MTD_GPIO_ADDR is not set
534# CONFIG_MTD_UCLINUX is not set 539# CONFIG_MTD_UCLINUX is not set
535# CONFIG_MTD_PLATRAM is not set 540# CONFIG_MTD_PLATRAM is not set
@@ -597,9 +602,42 @@ CONFIG_HAVE_IDE=y
597# SCSI device support 602# SCSI device support
598# 603#
599# CONFIG_RAID_ATTRS is not set 604# CONFIG_RAID_ATTRS is not set
600# CONFIG_SCSI is not set 605CONFIG_SCSI=y
601# CONFIG_SCSI_DMA is not set 606CONFIG_SCSI_DMA=y
607# CONFIG_SCSI_TGT is not set
602# CONFIG_SCSI_NETLINK is not set 608# CONFIG_SCSI_NETLINK is not set
609# CONFIG_SCSI_PROC_FS is not set
610
611#
612# SCSI support type (disk, tape, CD-ROM)
613#
614CONFIG_BLK_DEV_SD=y
615# CONFIG_CHR_DEV_ST is not set
616# CONFIG_CHR_DEV_OSST is not set
617CONFIG_BLK_DEV_SR=m
618# CONFIG_BLK_DEV_SR_VENDOR is not set
619# CONFIG_CHR_DEV_SG is not set
620# CONFIG_CHR_DEV_SCH is not set
621
622#
623# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
624#
625# CONFIG_SCSI_MULTI_LUN is not set
626# CONFIG_SCSI_CONSTANTS is not set
627# CONFIG_SCSI_LOGGING is not set
628# CONFIG_SCSI_SCAN_ASYNC is not set
629CONFIG_SCSI_WAIT_SCAN=m
630
631#
632# SCSI Transports
633#
634# CONFIG_SCSI_SPI_ATTRS is not set
635# CONFIG_SCSI_FC_ATTRS is not set
636# CONFIG_SCSI_ISCSI_ATTRS is not set
637# CONFIG_SCSI_SAS_LIBSAS is not set
638# CONFIG_SCSI_SRP_ATTRS is not set
639# CONFIG_SCSI_LOWLEVEL is not set
640# CONFIG_SCSI_DH is not set
603# CONFIG_ATA is not set 641# CONFIG_ATA is not set
604# CONFIG_MD is not set 642# CONFIG_MD is not set
605CONFIG_NETDEVICES=y 643CONFIG_NETDEVICES=y
@@ -644,9 +682,8 @@ CONFIG_BFIN_MAC_RMII=y
644# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 682# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
645# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 683# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
646# CONFIG_B44 is not set 684# CONFIG_B44 is not set
647CONFIG_NETDEV_1000=y 685# CONFIG_NETDEV_1000 is not set
648# CONFIG_AX88180 is not set 686# CONFIG_NETDEV_10000 is not set
649CONFIG_NETDEV_10000=y
650 687
651# 688#
652# Wireless LAN 689# Wireless LAN
@@ -715,14 +752,14 @@ CONFIG_INPUT_MISC=y
715# Character devices 752# Character devices
716# 753#
717# CONFIG_AD9960 is not set 754# CONFIG_AD9960 is not set
718# CONFIG_SPI_ADC_BF533 is not set 755CONFIG_BFIN_DMA_INTERFACE=m
719# CONFIG_BF5xx_PPIFCD is not set 756# CONFIG_BFIN_PPI is not set
757# CONFIG_BFIN_PPIFCD is not set
720# CONFIG_BFIN_SIMPLE_TIMER is not set 758# CONFIG_BFIN_SIMPLE_TIMER is not set
721# CONFIG_BF5xx_PPI is not set 759# CONFIG_BFIN_SPI_ADC is not set
722# CONFIG_BFIN_SPORT is not set 760# CONFIG_BFIN_SPORT is not set
723# CONFIG_BFIN_TIMER_LATENCY is not set 761# CONFIG_BFIN_TIMER_LATENCY is not set
724# CONFIG_TWI_LCD is not set 762# CONFIG_BFIN_TWI_LCD is not set
725CONFIG_BFIN_DMA_INTERFACE=m
726CONFIG_SIMPLE_GPIO=m 763CONFIG_SIMPLE_GPIO=m
727CONFIG_VT=y 764CONFIG_VT=y
728CONFIG_CONSOLE_TRANSLATIONS=y 765CONFIG_CONSOLE_TRANSLATIONS=y
@@ -832,11 +869,35 @@ CONFIG_SPI_BFIN=y
832# CONFIG_SPI_SPIDEV is not set 869# CONFIG_SPI_SPIDEV is not set
833# CONFIG_SPI_TLE62X0 is not set 870# CONFIG_SPI_TLE62X0 is not set
834CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 871CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
835# CONFIG_GPIOLIB is not set 872CONFIG_GPIOLIB=y
873# CONFIG_DEBUG_GPIO is not set
874CONFIG_GPIO_SYSFS=y
875
876#
877# Memory mapped GPIO expanders:
878#
879
880#
881# I2C GPIO expanders:
882#
883# CONFIG_GPIO_MAX732X is not set
884# CONFIG_GPIO_PCA953X is not set
885# CONFIG_GPIO_PCF857X is not set
886
887#
888# PCI GPIO expanders:
889#
890
891#
892# SPI GPIO expanders:
893#
894# CONFIG_GPIO_MAX7301 is not set
895# CONFIG_GPIO_MCP23S08 is not set
836# CONFIG_W1 is not set 896# CONFIG_W1 is not set
837# CONFIG_POWER_SUPPLY is not set 897# CONFIG_POWER_SUPPLY is not set
838CONFIG_HWMON=y 898CONFIG_HWMON=y
839# CONFIG_HWMON_VID is not set 899# CONFIG_HWMON_VID is not set
900# CONFIG_SENSORS_AD5252 is not set
840# CONFIG_SENSORS_AD7414 is not set 901# CONFIG_SENSORS_AD7414 is not set
841# CONFIG_SENSORS_AD7418 is not set 902# CONFIG_SENSORS_AD7418 is not set
842# CONFIG_SENSORS_ADCXX is not set 903# CONFIG_SENSORS_ADCXX is not set
@@ -920,6 +981,7 @@ CONFIG_SSB_POSSIBLE=y
920# CONFIG_HTC_PASIC3 is not set 981# CONFIG_HTC_PASIC3 is not set
921# CONFIG_MFD_TMIO is not set 982# CONFIG_MFD_TMIO is not set
922# CONFIG_PMIC_DA903X is not set 983# CONFIG_PMIC_DA903X is not set
984# CONFIG_PMIC_ADP5520 is not set
923# CONFIG_MFD_WM8400 is not set 985# CONFIG_MFD_WM8400 is not set
924# CONFIG_MFD_WM8350_I2C is not set 986# CONFIG_MFD_WM8350_I2C is not set
925# CONFIG_REGULATOR is not set 987# CONFIG_REGULATOR is not set
@@ -1008,8 +1070,8 @@ CONFIG_USB=y
1008# 1070#
1009# Miscellaneous USB options 1071# Miscellaneous USB options
1010# 1072#
1011# CONFIG_USB_DEVICEFS is not set 1073CONFIG_USB_DEVICEFS=y
1012CONFIG_USB_DEVICE_CLASS=y 1074# CONFIG_USB_DEVICE_CLASS is not set
1013# CONFIG_USB_DYNAMIC_MINORS is not set 1075# CONFIG_USB_DYNAMIC_MINORS is not set
1014# CONFIG_USB_OTG is not set 1076# CONFIG_USB_OTG is not set
1015# CONFIG_USB_OTG_WHITELIST is not set 1077# CONFIG_USB_OTG_WHITELIST is not set
@@ -1037,10 +1099,10 @@ CONFIG_USB_MUSB_SOC=y
1037CONFIG_USB_MUSB_HOST=y 1099CONFIG_USB_MUSB_HOST=y
1038# CONFIG_USB_MUSB_PERIPHERAL is not set 1100# CONFIG_USB_MUSB_PERIPHERAL is not set
1039# CONFIG_USB_MUSB_OTG is not set 1101# CONFIG_USB_MUSB_OTG is not set
1040# CONFIG_USB_GADGET_MUSB_HDRC is not set
1041CONFIG_USB_MUSB_HDRC_HCD=y 1102CONFIG_USB_MUSB_HDRC_HCD=y
1042CONFIG_MUSB_PIO_ONLY=y 1103# CONFIG_MUSB_PIO_ONLY is not set
1043CONFIG_MUSB_DMA_POLL=y 1104CONFIG_USB_INVENTRA_DMA=y
1105# CONFIG_USB_TI_CPPI_DMA is not set
1044# CONFIG_USB_MUSB_DEBUG is not set 1106# CONFIG_USB_MUSB_DEBUG is not set
1045 1107
1046# 1108#
@@ -1058,7 +1120,7 @@ CONFIG_MUSB_DMA_POLL=y
1058# 1120#
1059# see USB_STORAGE Help for more information 1121# see USB_STORAGE Help for more information
1060# 1122#
1061CONFIG_USB_STORAGE=m 1123CONFIG_USB_STORAGE=y
1062# CONFIG_USB_STORAGE_DEBUG is not set 1124# CONFIG_USB_STORAGE_DEBUG is not set
1063# CONFIG_USB_STORAGE_DATAFAB is not set 1125# CONFIG_USB_STORAGE_DATAFAB is not set
1064# CONFIG_USB_STORAGE_FREECOM is not set 1126# CONFIG_USB_STORAGE_FREECOM is not set
@@ -1107,33 +1169,10 @@ CONFIG_USB_STORAGE=m
1107# CONFIG_USB_LD is not set 1169# CONFIG_USB_LD is not set
1108# CONFIG_USB_TRANCEVIBRATOR is not set 1170# CONFIG_USB_TRANCEVIBRATOR is not set
1109# CONFIG_USB_IOWARRIOR is not set 1171# CONFIG_USB_IOWARRIOR is not set
1172# CONFIG_USB_TEST is not set
1110# CONFIG_USB_ISIGHTFW is not set 1173# CONFIG_USB_ISIGHTFW is not set
1111# CONFIG_USB_VST is not set 1174# CONFIG_USB_VST is not set
1112# CONFIG_USB_GADGET is not set 1175# CONFIG_USB_GADGET is not set
1113# CONFIG_USB_GADGET_AT91 is not set
1114# CONFIG_USB_GADGET_ATMEL_USBA is not set
1115# CONFIG_USB_GADGET_FSL_USB2 is not set
1116# CONFIG_USB_GADGET_LH7A40X is not set
1117# CONFIG_USB_GADGET_OMAP is not set
1118# CONFIG_USB_GADGET_PXA25X is not set
1119# CONFIG_USB_GADGET_PXA27X is not set
1120# CONFIG_USB_GADGET_S3C2410 is not set
1121# CONFIG_USB_GADGET_M66592 is not set
1122# CONFIG_USB_GADGET_AMD5536UDC is not set
1123# CONFIG_USB_GADGET_FSL_QE is not set
1124# CONFIG_USB_GADGET_NET2272 is not set
1125# CONFIG_USB_GADGET_NET2280 is not set
1126# CONFIG_USB_GADGET_GOKU is not set
1127# CONFIG_USB_GADGET_DUMMY_HCD is not set
1128# CONFIG_USB_ZERO is not set
1129# CONFIG_USB_AUDIO is not set
1130# CONFIG_USB_ETH is not set
1131# CONFIG_USB_GADGETFS is not set
1132# CONFIG_USB_FILE_STORAGE is not set
1133# CONFIG_USB_G_SERIAL is not set
1134# CONFIG_USB_MIDI_GADGET is not set
1135# CONFIG_USB_G_PRINTER is not set
1136# CONFIG_USB_CDC_COMPOSITE is not set
1137# CONFIG_MMC is not set 1176# CONFIG_MMC is not set
1138# CONFIG_MEMSTICK is not set 1177# CONFIG_MEMSTICK is not set
1139# CONFIG_NEW_LEDS is not set 1178# CONFIG_NEW_LEDS is not set
@@ -1206,7 +1245,8 @@ CONFIG_RTC_DRV_BFIN=y
1206# 1245#
1207# File systems 1246# File systems
1208# 1247#
1209# CONFIG_EXT2_FS is not set 1248CONFIG_EXT2_FS=m
1249# CONFIG_EXT2_FS_XATTR is not set
1210# CONFIG_EXT3_FS is not set 1250# CONFIG_EXT3_FS is not set
1211# CONFIG_EXT4_FS is not set 1251# CONFIG_EXT4_FS is not set
1212# CONFIG_REISERFS_FS is not set 1252# CONFIG_REISERFS_FS is not set
@@ -1226,14 +1266,19 @@ CONFIG_INOTIFY_USER=y
1226# 1266#
1227# CD-ROM/DVD Filesystems 1267# CD-ROM/DVD Filesystems
1228# 1268#
1229# CONFIG_ISO9660_FS is not set 1269CONFIG_ISO9660_FS=m
1270CONFIG_JOLIET=y
1271# CONFIG_ZISOFS is not set
1230# CONFIG_UDF_FS is not set 1272# CONFIG_UDF_FS is not set
1231 1273
1232# 1274#
1233# DOS/FAT/NT Filesystems 1275# DOS/FAT/NT Filesystems
1234# 1276#
1277CONFIG_FAT_FS=m
1235# CONFIG_MSDOS_FS is not set 1278# CONFIG_MSDOS_FS is not set
1236# CONFIG_VFAT_FS is not set 1279CONFIG_VFAT_FS=m
1280CONFIG_FAT_DEFAULT_CODEPAGE=437
1281CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1237# CONFIG_NTFS_FS is not set 1282# CONFIG_NTFS_FS is not set
1238 1283
1239# 1284#
@@ -1256,16 +1301,6 @@ CONFIG_SYSFS=y
1256# CONFIG_BEFS_FS is not set 1301# CONFIG_BEFS_FS is not set
1257# CONFIG_BFS_FS is not set 1302# CONFIG_BFS_FS is not set
1258# CONFIG_EFS_FS is not set 1303# CONFIG_EFS_FS is not set
1259CONFIG_YAFFS_FS=m
1260CONFIG_YAFFS_YAFFS1=y
1261# CONFIG_YAFFS_9BYTE_TAGS is not set
1262# CONFIG_YAFFS_DOES_ECC is not set
1263CONFIG_YAFFS_YAFFS2=y
1264CONFIG_YAFFS_AUTO_YAFFS2=y
1265# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1266# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1267# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1268CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1269CONFIG_JFFS2_FS=m 1304CONFIG_JFFS2_FS=m
1270CONFIG_JFFS2_FS_DEBUG=0 1305CONFIG_JFFS2_FS_DEBUG=0
1271CONFIG_JFFS2_FS_WRITEBUFFER=y 1306CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -1277,6 +1312,16 @@ CONFIG_JFFS2_ZLIB=y
1277# CONFIG_JFFS2_LZO is not set 1312# CONFIG_JFFS2_LZO is not set
1278CONFIG_JFFS2_RTIME=y 1313CONFIG_JFFS2_RTIME=y
1279# CONFIG_JFFS2_RUBIN is not set 1314# CONFIG_JFFS2_RUBIN is not set
1315CONFIG_YAFFS_FS=m
1316CONFIG_YAFFS_YAFFS1=y
1317# CONFIG_YAFFS_9BYTE_TAGS is not set
1318# CONFIG_YAFFS_DOES_ECC is not set
1319CONFIG_YAFFS_YAFFS2=y
1320CONFIG_YAFFS_AUTO_YAFFS2=y
1321# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1322# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1323# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1324CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1280# CONFIG_CRAMFS is not set 1325# CONFIG_CRAMFS is not set
1281# CONFIG_VXFS_FS is not set 1326# CONFIG_VXFS_FS is not set
1282# CONFIG_MINIX_FS is not set 1327# CONFIG_MINIX_FS is not set
@@ -1313,7 +1358,7 @@ CONFIG_SMB_FS=m
1313CONFIG_MSDOS_PARTITION=y 1358CONFIG_MSDOS_PARTITION=y
1314CONFIG_NLS=m 1359CONFIG_NLS=m
1315CONFIG_NLS_DEFAULT="iso8859-1" 1360CONFIG_NLS_DEFAULT="iso8859-1"
1316# CONFIG_NLS_CODEPAGE_437 is not set 1361CONFIG_NLS_CODEPAGE_437=m
1317# CONFIG_NLS_CODEPAGE_737 is not set 1362# CONFIG_NLS_CODEPAGE_737 is not set
1318# CONFIG_NLS_CODEPAGE_775 is not set 1363# CONFIG_NLS_CODEPAGE_775 is not set
1319# CONFIG_NLS_CODEPAGE_850 is not set 1364# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1328,7 +1373,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1328# CONFIG_NLS_CODEPAGE_865 is not set 1373# CONFIG_NLS_CODEPAGE_865 is not set
1329# CONFIG_NLS_CODEPAGE_866 is not set 1374# CONFIG_NLS_CODEPAGE_866 is not set
1330# CONFIG_NLS_CODEPAGE_869 is not set 1375# CONFIG_NLS_CODEPAGE_869 is not set
1331# CONFIG_NLS_CODEPAGE_936 is not set 1376CONFIG_NLS_CODEPAGE_936=m
1332# CONFIG_NLS_CODEPAGE_950 is not set 1377# CONFIG_NLS_CODEPAGE_950 is not set
1333# CONFIG_NLS_CODEPAGE_932 is not set 1378# CONFIG_NLS_CODEPAGE_932 is not set
1334# CONFIG_NLS_CODEPAGE_949 is not set 1379# CONFIG_NLS_CODEPAGE_949 is not set
@@ -1337,7 +1382,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1337# CONFIG_NLS_CODEPAGE_1250 is not set 1382# CONFIG_NLS_CODEPAGE_1250 is not set
1338# CONFIG_NLS_CODEPAGE_1251 is not set 1383# CONFIG_NLS_CODEPAGE_1251 is not set
1339# CONFIG_NLS_ASCII is not set 1384# CONFIG_NLS_ASCII is not set
1340# CONFIG_NLS_ISO8859_1 is not set 1385CONFIG_NLS_ISO8859_1=m
1341# CONFIG_NLS_ISO8859_2 is not set 1386# CONFIG_NLS_ISO8859_2 is not set
1342# CONFIG_NLS_ISO8859_3 is not set 1387# CONFIG_NLS_ISO8859_3 is not set
1343# CONFIG_NLS_ISO8859_4 is not set 1388# CONFIG_NLS_ISO8859_4 is not set
@@ -1350,7 +1395,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1350# CONFIG_NLS_ISO8859_15 is not set 1395# CONFIG_NLS_ISO8859_15 is not set
1351# CONFIG_NLS_KOI8_R is not set 1396# CONFIG_NLS_KOI8_R is not set
1352# CONFIG_NLS_KOI8_U is not set 1397# CONFIG_NLS_KOI8_U is not set
1353# CONFIG_NLS_UTF8 is not set 1398CONFIG_NLS_UTF8=m
1354# CONFIG_DLM is not set 1399# CONFIG_DLM is not set
1355 1400
1356# 1401#
@@ -1365,7 +1410,7 @@ CONFIG_FRAME_WARN=1024
1365CONFIG_DEBUG_FS=y 1410CONFIG_DEBUG_FS=y
1366# CONFIG_HEADERS_CHECK is not set 1411# CONFIG_HEADERS_CHECK is not set
1367CONFIG_DEBUG_KERNEL=y 1412CONFIG_DEBUG_KERNEL=y
1368# CONFIG_DEBUG_SHIRQ is not set 1413CONFIG_DEBUG_SHIRQ=y
1369CONFIG_DETECT_SOFTLOCKUP=y 1414CONFIG_DETECT_SOFTLOCKUP=y
1370# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1415# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1371CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1416CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1374,8 +1419,6 @@ CONFIG_SCHED_DEBUG=y
1374# CONFIG_TIMER_STATS is not set 1419# CONFIG_TIMER_STATS is not set
1375# CONFIG_DEBUG_OBJECTS is not set 1420# CONFIG_DEBUG_OBJECTS is not set
1376# CONFIG_DEBUG_SLAB is not set 1421# CONFIG_DEBUG_SLAB is not set
1377# CONFIG_DEBUG_RT_MUTEXES is not set
1378# CONFIG_RT_MUTEX_TESTER is not set
1379# CONFIG_DEBUG_SPINLOCK is not set 1422# CONFIG_DEBUG_SPINLOCK is not set
1380# CONFIG_DEBUG_MUTEXES is not set 1423# CONFIG_DEBUG_MUTEXES is not set
1381# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1424# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1395,7 +1438,6 @@ CONFIG_DEBUG_INFO=y
1395# CONFIG_BACKTRACE_SELF_TEST is not set 1438# CONFIG_BACKTRACE_SELF_TEST is not set
1396# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1439# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1397# CONFIG_FAULT_INJECTION is not set 1440# CONFIG_FAULT_INJECTION is not set
1398CONFIG_SYSCTL_SYSCALL_CHECK=y
1399 1441
1400# 1442#
1401# Tracers 1443# Tracers
@@ -1411,16 +1453,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1411# CONFIG_DEBUG_STACK_USAGE is not set 1453# CONFIG_DEBUG_STACK_USAGE is not set
1412CONFIG_DEBUG_VERBOSE=y 1454CONFIG_DEBUG_VERBOSE=y
1413CONFIG_DEBUG_MMRS=y 1455CONFIG_DEBUG_MMRS=y
1414# CONFIG_DEBUG_HWERR is not set 1456CONFIG_DEBUG_HWERR=y
1415# CONFIG_DEBUG_DOUBLEFAULT is not set 1457CONFIG_EXACT_HWERR=y
1458CONFIG_DEBUG_DOUBLEFAULT=y
1459CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1460# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1461# CONFIG_DEBUG_ICACHE_CHECK is not set
1416CONFIG_DEBUG_HUNT_FOR_ZERO=y 1462CONFIG_DEBUG_HUNT_FOR_ZERO=y
1417CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1463CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1418CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1464# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1419# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1465CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1420# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1466# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1421CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1467CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1422# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1468# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1423# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1469CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1424CONFIG_EARLY_PRINTK=y 1470CONFIG_EARLY_PRINTK=y
1425CONFIG_CPLB_INFO=y 1471CONFIG_CPLB_INFO=y
1426CONFIG_ACCESS_CHECK=y 1472CONFIG_ACCESS_CHECK=y
@@ -1534,7 +1580,6 @@ CONFIG_CRC32=y
1534# CONFIG_LIBCRC32C is not set 1580# CONFIG_LIBCRC32C is not set
1535CONFIG_ZLIB_INFLATE=y 1581CONFIG_ZLIB_INFLATE=y
1536CONFIG_ZLIB_DEFLATE=m 1582CONFIG_ZLIB_DEFLATE=m
1537CONFIG_PLIST=y
1538CONFIG_HAS_IOMEM=y 1583CONFIG_HAS_IOMEM=y
1539CONFIG_HAS_IOPORT=y 1584CONFIG_HAS_IOPORT=y
1540CONFIG_HAS_DMA=y 1585CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 911b5dba1dbc..e9175c608aa7 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -145,8 +145,8 @@ CONFIG_BF527=y
145CONFIG_BF_REV_MIN=0 145CONFIG_BF_REV_MIN=0
146CONFIG_BF_REV_MAX=2 146CONFIG_BF_REV_MAX=2
147# CONFIG_BF_REV_0_0 is not set 147# CONFIG_BF_REV_0_0 is not set
148CONFIG_BF_REV_0_1=y 148# CONFIG_BF_REV_0_1 is not set
149# CONFIG_BF_REV_0_2 is not set 149CONFIG_BF_REV_0_2=y
150# CONFIG_BF_REV_0_3 is not set 150# CONFIG_BF_REV_0_3 is not set
151# CONFIG_BF_REV_0_4 is not set 151# CONFIG_BF_REV_0_4 is not set
152# CONFIG_BF_REV_0_5 is not set 152# CONFIG_BF_REV_0_5 is not set
@@ -264,7 +264,10 @@ CONFIG_HZ=250
264# CONFIG_SCHED_HRTICK is not set 264# CONFIG_SCHED_HRTICK is not set
265CONFIG_GENERIC_TIME=y 265CONFIG_GENERIC_TIME=y
266CONFIG_GENERIC_CLOCKEVENTS=y 266CONFIG_GENERIC_CLOCKEVENTS=y
267# CONFIG_TICKSOURCE_GPTMR0 is not set
268CONFIG_TICKSOURCE_CORETMR=y
267# CONFIG_CYCLES_CLOCKSOURCE is not set 269# CONFIG_CYCLES_CLOCKSOURCE is not set
270# CONFIG_GPTMR0_CLOCKSOURCE is not set
268# CONFIG_NO_HZ is not set 271# CONFIG_NO_HZ is not set
269# CONFIG_HIGH_RES_TIMERS is not set 272# CONFIG_HIGH_RES_TIMERS is not set
270CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 273CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -318,7 +321,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
318# CONFIG_PHYS_ADDR_T_64BIT is not set 321# CONFIG_PHYS_ADDR_T_64BIT is not set
319CONFIG_ZONE_DMA_FLAG=1 322CONFIG_ZONE_DMA_FLAG=1
320CONFIG_VIRT_TO_BUS=y 323CONFIG_VIRT_TO_BUS=y
321CONFIG_BFIN_GPTIMERS=m 324CONFIG_BFIN_GPTIMERS=y
322# CONFIG_DMA_UNCACHED_4M is not set 325# CONFIG_DMA_UNCACHED_4M is not set
323# CONFIG_DMA_UNCACHED_2M is not set 326# CONFIG_DMA_UNCACHED_2M is not set
324CONFIG_DMA_UNCACHED_1M=y 327CONFIG_DMA_UNCACHED_1M=y
@@ -409,7 +412,7 @@ CONFIG_IP_PNP=y
409# CONFIG_NET_IPIP is not set 412# CONFIG_NET_IPIP is not set
410# CONFIG_NET_IPGRE is not set 413# CONFIG_NET_IPGRE is not set
411# CONFIG_ARPD is not set 414# CONFIG_ARPD is not set
412CONFIG_SYN_COOKIES=y 415# CONFIG_SYN_COOKIES is not set
413# CONFIG_INET_AH is not set 416# CONFIG_INET_AH is not set
414# CONFIG_INET_ESP is not set 417# CONFIG_INET_ESP is not set
415# CONFIG_INET_IPCOMP is not set 418# CONFIG_INET_IPCOMP is not set
@@ -639,9 +642,42 @@ CONFIG_HAVE_IDE=y
639# SCSI device support 642# SCSI device support
640# 643#
641# CONFIG_RAID_ATTRS is not set 644# CONFIG_RAID_ATTRS is not set
642# CONFIG_SCSI is not set 645CONFIG_SCSI=y
643# CONFIG_SCSI_DMA is not set 646CONFIG_SCSI_DMA=y
647# CONFIG_SCSI_TGT is not set
644# CONFIG_SCSI_NETLINK is not set 648# CONFIG_SCSI_NETLINK is not set
649# CONFIG_SCSI_PROC_FS is not set
650
651#
652# SCSI support type (disk, tape, CD-ROM)
653#
654CONFIG_BLK_DEV_SD=y
655# CONFIG_CHR_DEV_ST is not set
656# CONFIG_CHR_DEV_OSST is not set
657CONFIG_BLK_DEV_SR=m
658# CONFIG_BLK_DEV_SR_VENDOR is not set
659# CONFIG_CHR_DEV_SG is not set
660# CONFIG_CHR_DEV_SCH is not set
661
662#
663# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
664#
665# CONFIG_SCSI_MULTI_LUN is not set
666# CONFIG_SCSI_CONSTANTS is not set
667# CONFIG_SCSI_LOGGING is not set
668# CONFIG_SCSI_SCAN_ASYNC is not set
669CONFIG_SCSI_WAIT_SCAN=m
670
671#
672# SCSI Transports
673#
674# CONFIG_SCSI_SPI_ATTRS is not set
675# CONFIG_SCSI_FC_ATTRS is not set
676# CONFIG_SCSI_ISCSI_ATTRS is not set
677# CONFIG_SCSI_SAS_LIBSAS is not set
678# CONFIG_SCSI_SRP_ATTRS is not set
679# CONFIG_SCSI_LOWLEVEL is not set
680# CONFIG_SCSI_DH is not set
645# CONFIG_ATA is not set 681# CONFIG_ATA is not set
646# CONFIG_MD is not set 682# CONFIG_MD is not set
647CONFIG_NETDEVICES=y 683CONFIG_NETDEVICES=y
@@ -687,9 +723,8 @@ CONFIG_BFIN_MAC_RMII=y
687# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 723# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
688# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 724# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
689# CONFIG_B44 is not set 725# CONFIG_B44 is not set
690CONFIG_NETDEV_1000=y 726# CONFIG_NETDEV_1000 is not set
691# CONFIG_AX88180 is not set 727# CONFIG_NETDEV_10000 is not set
692CONFIG_NETDEV_10000=y
693 728
694# 729#
695# Wireless LAN 730# Wireless LAN
@@ -758,14 +793,14 @@ CONFIG_INPUT_MISC=y
758# Character devices 793# Character devices
759# 794#
760# CONFIG_AD9960 is not set 795# CONFIG_AD9960 is not set
761# CONFIG_SPI_ADC_BF533 is not set 796CONFIG_BFIN_DMA_INTERFACE=m
762# CONFIG_BF5xx_PPIFCD is not set 797# CONFIG_BFIN_PPI is not set
798# CONFIG_BFIN_PPIFCD is not set
763# CONFIG_BFIN_SIMPLE_TIMER is not set 799# CONFIG_BFIN_SIMPLE_TIMER is not set
764CONFIG_BF5xx_PPI=m 800# CONFIG_BFIN_SPI_ADC is not set
765CONFIG_BFIN_SPORT=m 801CONFIG_BFIN_SPORT=m
766# CONFIG_BFIN_TIMER_LATENCY is not set 802# CONFIG_BFIN_TIMER_LATENCY is not set
767# CONFIG_TWI_LCD is not set 803# CONFIG_BFIN_TWI_LCD is not set
768CONFIG_BFIN_DMA_INTERFACE=m
769CONFIG_SIMPLE_GPIO=m 804CONFIG_SIMPLE_GPIO=m
770CONFIG_VT=y 805CONFIG_VT=y
771CONFIG_CONSOLE_TRANSLATIONS=y 806CONFIG_CONSOLE_TRANSLATIONS=y
@@ -875,7 +910,30 @@ CONFIG_SPI_BFIN=y
875# CONFIG_SPI_SPIDEV is not set 910# CONFIG_SPI_SPIDEV is not set
876# CONFIG_SPI_TLE62X0 is not set 911# CONFIG_SPI_TLE62X0 is not set
877CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 912CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
878# CONFIG_GPIOLIB is not set 913CONFIG_GPIOLIB=y
914# CONFIG_DEBUG_GPIO is not set
915CONFIG_GPIO_SYSFS=y
916
917#
918# Memory mapped GPIO expanders:
919#
920
921#
922# I2C GPIO expanders:
923#
924# CONFIG_GPIO_MAX732X is not set
925# CONFIG_GPIO_PCA953X is not set
926# CONFIG_GPIO_PCF857X is not set
927
928#
929# PCI GPIO expanders:
930#
931
932#
933# SPI GPIO expanders:
934#
935# CONFIG_GPIO_MAX7301 is not set
936# CONFIG_GPIO_MCP23S08 is not set
879# CONFIG_W1 is not set 937# CONFIG_W1 is not set
880# CONFIG_POWER_SUPPLY is not set 938# CONFIG_POWER_SUPPLY is not set
881# CONFIG_HWMON is not set 939# CONFIG_HWMON is not set
@@ -909,6 +967,7 @@ CONFIG_SSB_POSSIBLE=y
909# CONFIG_HTC_PASIC3 is not set 967# CONFIG_HTC_PASIC3 is not set
910# CONFIG_MFD_TMIO is not set 968# CONFIG_MFD_TMIO is not set
911# CONFIG_PMIC_DA903X is not set 969# CONFIG_PMIC_DA903X is not set
970# CONFIG_PMIC_ADP5520 is not set
912# CONFIG_MFD_WM8400 is not set 971# CONFIG_MFD_WM8400 is not set
913# CONFIG_MFD_WM8350_I2C is not set 972# CONFIG_MFD_WM8350_I2C is not set
914# CONFIG_REGULATOR is not set 973# CONFIG_REGULATOR is not set
@@ -1091,8 +1150,8 @@ CONFIG_USB=y
1091# 1150#
1092# Miscellaneous USB options 1151# Miscellaneous USB options
1093# 1152#
1094# CONFIG_USB_DEVICEFS is not set 1153CONFIG_USB_DEVICEFS=y
1095CONFIG_USB_DEVICE_CLASS=y 1154# CONFIG_USB_DEVICE_CLASS is not set
1096# CONFIG_USB_DYNAMIC_MINORS is not set 1155# CONFIG_USB_DYNAMIC_MINORS is not set
1097# CONFIG_USB_OTG is not set 1156# CONFIG_USB_OTG is not set
1098# CONFIG_USB_OTG_WHITELIST is not set 1157# CONFIG_USB_OTG_WHITELIST is not set
@@ -1120,10 +1179,10 @@ CONFIG_USB_MUSB_SOC=y
1120CONFIG_USB_MUSB_HOST=y 1179CONFIG_USB_MUSB_HOST=y
1121# CONFIG_USB_MUSB_PERIPHERAL is not set 1180# CONFIG_USB_MUSB_PERIPHERAL is not set
1122# CONFIG_USB_MUSB_OTG is not set 1181# CONFIG_USB_MUSB_OTG is not set
1123# CONFIG_USB_GADGET_MUSB_HDRC is not set
1124CONFIG_USB_MUSB_HDRC_HCD=y 1182CONFIG_USB_MUSB_HDRC_HCD=y
1125CONFIG_MUSB_PIO_ONLY=y 1183# CONFIG_MUSB_PIO_ONLY is not set
1126CONFIG_MUSB_DMA_POLL=y 1184CONFIG_USB_INVENTRA_DMA=y
1185# CONFIG_USB_TI_CPPI_DMA is not set
1127# CONFIG_USB_MUSB_DEBUG is not set 1186# CONFIG_USB_MUSB_DEBUG is not set
1128 1187
1129# 1188#
@@ -1141,7 +1200,7 @@ CONFIG_MUSB_DMA_POLL=y
1141# 1200#
1142# see USB_STORAGE Help for more information 1201# see USB_STORAGE Help for more information
1143# 1202#
1144CONFIG_USB_STORAGE=m 1203CONFIG_USB_STORAGE=y
1145# CONFIG_USB_STORAGE_DEBUG is not set 1204# CONFIG_USB_STORAGE_DEBUG is not set
1146# CONFIG_USB_STORAGE_DATAFAB is not set 1205# CONFIG_USB_STORAGE_DATAFAB is not set
1147# CONFIG_USB_STORAGE_FREECOM is not set 1206# CONFIG_USB_STORAGE_FREECOM is not set
@@ -1190,33 +1249,10 @@ CONFIG_USB_STORAGE=m
1190# CONFIG_USB_LD is not set 1249# CONFIG_USB_LD is not set
1191# CONFIG_USB_TRANCEVIBRATOR is not set 1250# CONFIG_USB_TRANCEVIBRATOR is not set
1192# CONFIG_USB_IOWARRIOR is not set 1251# CONFIG_USB_IOWARRIOR is not set
1252# CONFIG_USB_TEST is not set
1193# CONFIG_USB_ISIGHTFW is not set 1253# CONFIG_USB_ISIGHTFW is not set
1194# CONFIG_USB_VST is not set 1254# CONFIG_USB_VST is not set
1195# CONFIG_USB_GADGET is not set 1255# CONFIG_USB_GADGET is not set
1196# CONFIG_USB_GADGET_AT91 is not set
1197# CONFIG_USB_GADGET_ATMEL_USBA is not set
1198# CONFIG_USB_GADGET_FSL_USB2 is not set
1199# CONFIG_USB_GADGET_LH7A40X is not set
1200# CONFIG_USB_GADGET_OMAP is not set
1201# CONFIG_USB_GADGET_PXA25X is not set
1202# CONFIG_USB_GADGET_PXA27X is not set
1203# CONFIG_USB_GADGET_S3C2410 is not set
1204# CONFIG_USB_GADGET_M66592 is not set
1205# CONFIG_USB_GADGET_AMD5536UDC is not set
1206# CONFIG_USB_GADGET_FSL_QE is not set
1207# CONFIG_USB_GADGET_NET2272 is not set
1208# CONFIG_USB_GADGET_NET2280 is not set
1209# CONFIG_USB_GADGET_GOKU is not set
1210# CONFIG_USB_GADGET_DUMMY_HCD is not set
1211# CONFIG_USB_ZERO is not set
1212# CONFIG_USB_AUDIO is not set
1213# CONFIG_USB_ETH is not set
1214# CONFIG_USB_GADGETFS is not set
1215# CONFIG_USB_FILE_STORAGE is not set
1216# CONFIG_USB_G_SERIAL is not set
1217# CONFIG_USB_MIDI_GADGET is not set
1218# CONFIG_USB_G_PRINTER is not set
1219# CONFIG_USB_CDC_COMPOSITE is not set
1220# CONFIG_MMC is not set 1256# CONFIG_MMC is not set
1221# CONFIG_MEMSTICK is not set 1257# CONFIG_MEMSTICK is not set
1222# CONFIG_NEW_LEDS is not set 1258# CONFIG_NEW_LEDS is not set
@@ -1289,7 +1325,8 @@ CONFIG_RTC_DRV_BFIN=y
1289# 1325#
1290# File systems 1326# File systems
1291# 1327#
1292# CONFIG_EXT2_FS is not set 1328CONFIG_EXT2_FS=m
1329# CONFIG_EXT2_FS_XATTR is not set
1293# CONFIG_EXT3_FS is not set 1330# CONFIG_EXT3_FS is not set
1294# CONFIG_EXT4_FS is not set 1331# CONFIG_EXT4_FS is not set
1295# CONFIG_REISERFS_FS is not set 1332# CONFIG_REISERFS_FS is not set
@@ -1309,14 +1346,20 @@ CONFIG_INOTIFY_USER=y
1309# 1346#
1310# CD-ROM/DVD Filesystems 1347# CD-ROM/DVD Filesystems
1311# 1348#
1312# CONFIG_ISO9660_FS is not set 1349CONFIG_ISO9660_FS=m
1313# CONFIG_UDF_FS is not set 1350CONFIG_JOLIET=y
1351# CONFIG_ZISOFS is not set
1352CONFIG_UDF_FS=m
1353CONFIG_UDF_NLS=y
1314 1354
1315# 1355#
1316# DOS/FAT/NT Filesystems 1356# DOS/FAT/NT Filesystems
1317# 1357#
1358CONFIG_FAT_FS=m
1318# CONFIG_MSDOS_FS is not set 1359# CONFIG_MSDOS_FS is not set
1319# CONFIG_VFAT_FS is not set 1360CONFIG_VFAT_FS=m
1361CONFIG_FAT_DEFAULT_CODEPAGE=437
1362CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1320# CONFIG_NTFS_FS is not set 1363# CONFIG_NTFS_FS is not set
1321 1364
1322# 1365#
@@ -1339,16 +1382,6 @@ CONFIG_SYSFS=y
1339# CONFIG_BEFS_FS is not set 1382# CONFIG_BEFS_FS is not set
1340# CONFIG_BFS_FS is not set 1383# CONFIG_BFS_FS is not set
1341# CONFIG_EFS_FS is not set 1384# CONFIG_EFS_FS is not set
1342CONFIG_YAFFS_FS=m
1343CONFIG_YAFFS_YAFFS1=y
1344# CONFIG_YAFFS_9BYTE_TAGS is not set
1345# CONFIG_YAFFS_DOES_ECC is not set
1346CONFIG_YAFFS_YAFFS2=y
1347CONFIG_YAFFS_AUTO_YAFFS2=y
1348# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1349# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1350# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1351CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1352CONFIG_JFFS2_FS=m 1385CONFIG_JFFS2_FS=m
1353CONFIG_JFFS2_FS_DEBUG=0 1386CONFIG_JFFS2_FS_DEBUG=0
1354CONFIG_JFFS2_FS_WRITEBUFFER=y 1387CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -1360,6 +1393,16 @@ CONFIG_JFFS2_ZLIB=y
1360# CONFIG_JFFS2_LZO is not set 1393# CONFIG_JFFS2_LZO is not set
1361CONFIG_JFFS2_RTIME=y 1394CONFIG_JFFS2_RTIME=y
1362# CONFIG_JFFS2_RUBIN is not set 1395# CONFIG_JFFS2_RUBIN is not set
1396CONFIG_YAFFS_FS=m
1397CONFIG_YAFFS_YAFFS1=y
1398# CONFIG_YAFFS_9BYTE_TAGS is not set
1399# CONFIG_YAFFS_DOES_ECC is not set
1400CONFIG_YAFFS_YAFFS2=y
1401CONFIG_YAFFS_AUTO_YAFFS2=y
1402# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1403# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1404# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1405CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1363# CONFIG_CRAMFS is not set 1406# CONFIG_CRAMFS is not set
1364# CONFIG_VXFS_FS is not set 1407# CONFIG_VXFS_FS is not set
1365# CONFIG_MINIX_FS is not set 1408# CONFIG_MINIX_FS is not set
@@ -1396,7 +1439,7 @@ CONFIG_SMB_FS=m
1396CONFIG_MSDOS_PARTITION=y 1439CONFIG_MSDOS_PARTITION=y
1397CONFIG_NLS=m 1440CONFIG_NLS=m
1398CONFIG_NLS_DEFAULT="iso8859-1" 1441CONFIG_NLS_DEFAULT="iso8859-1"
1399# CONFIG_NLS_CODEPAGE_437 is not set 1442CONFIG_NLS_CODEPAGE_437=m
1400# CONFIG_NLS_CODEPAGE_737 is not set 1443# CONFIG_NLS_CODEPAGE_737 is not set
1401# CONFIG_NLS_CODEPAGE_775 is not set 1444# CONFIG_NLS_CODEPAGE_775 is not set
1402# CONFIG_NLS_CODEPAGE_850 is not set 1445# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1411,7 +1454,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1411# CONFIG_NLS_CODEPAGE_865 is not set 1454# CONFIG_NLS_CODEPAGE_865 is not set
1412# CONFIG_NLS_CODEPAGE_866 is not set 1455# CONFIG_NLS_CODEPAGE_866 is not set
1413# CONFIG_NLS_CODEPAGE_869 is not set 1456# CONFIG_NLS_CODEPAGE_869 is not set
1414# CONFIG_NLS_CODEPAGE_936 is not set 1457CONFIG_NLS_CODEPAGE_936=m
1415# CONFIG_NLS_CODEPAGE_950 is not set 1458# CONFIG_NLS_CODEPAGE_950 is not set
1416# CONFIG_NLS_CODEPAGE_932 is not set 1459# CONFIG_NLS_CODEPAGE_932 is not set
1417# CONFIG_NLS_CODEPAGE_949 is not set 1460# CONFIG_NLS_CODEPAGE_949 is not set
@@ -1420,7 +1463,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1420# CONFIG_NLS_CODEPAGE_1250 is not set 1463# CONFIG_NLS_CODEPAGE_1250 is not set
1421# CONFIG_NLS_CODEPAGE_1251 is not set 1464# CONFIG_NLS_CODEPAGE_1251 is not set
1422# CONFIG_NLS_ASCII is not set 1465# CONFIG_NLS_ASCII is not set
1423# CONFIG_NLS_ISO8859_1 is not set 1466CONFIG_NLS_ISO8859_1=m
1424# CONFIG_NLS_ISO8859_2 is not set 1467# CONFIG_NLS_ISO8859_2 is not set
1425# CONFIG_NLS_ISO8859_3 is not set 1468# CONFIG_NLS_ISO8859_3 is not set
1426# CONFIG_NLS_ISO8859_4 is not set 1469# CONFIG_NLS_ISO8859_4 is not set
@@ -1433,7 +1476,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
1433# CONFIG_NLS_ISO8859_15 is not set 1476# CONFIG_NLS_ISO8859_15 is not set
1434# CONFIG_NLS_KOI8_R is not set 1477# CONFIG_NLS_KOI8_R is not set
1435# CONFIG_NLS_KOI8_U is not set 1478# CONFIG_NLS_KOI8_U is not set
1436# CONFIG_NLS_UTF8 is not set 1479CONFIG_NLS_UTF8=m
1437# CONFIG_DLM is not set 1480# CONFIG_DLM is not set
1438 1481
1439# 1482#
@@ -1448,7 +1491,7 @@ CONFIG_FRAME_WARN=1024
1448CONFIG_DEBUG_FS=y 1491CONFIG_DEBUG_FS=y
1449# CONFIG_HEADERS_CHECK is not set 1492# CONFIG_HEADERS_CHECK is not set
1450CONFIG_DEBUG_KERNEL=y 1493CONFIG_DEBUG_KERNEL=y
1451# CONFIG_DEBUG_SHIRQ is not set 1494CONFIG_DEBUG_SHIRQ=y
1452CONFIG_DETECT_SOFTLOCKUP=y 1495CONFIG_DETECT_SOFTLOCKUP=y
1453# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1496# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1454CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1497CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1457,8 +1500,6 @@ CONFIG_SCHED_DEBUG=y
1457# CONFIG_TIMER_STATS is not set 1500# CONFIG_TIMER_STATS is not set
1458# CONFIG_DEBUG_OBJECTS is not set 1501# CONFIG_DEBUG_OBJECTS is not set
1459# CONFIG_DEBUG_SLAB is not set 1502# CONFIG_DEBUG_SLAB is not set
1460# CONFIG_DEBUG_RT_MUTEXES is not set
1461# CONFIG_RT_MUTEX_TESTER is not set
1462# CONFIG_DEBUG_SPINLOCK is not set 1503# CONFIG_DEBUG_SPINLOCK is not set
1463# CONFIG_DEBUG_MUTEXES is not set 1504# CONFIG_DEBUG_MUTEXES is not set
1464# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1505# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1478,7 +1519,6 @@ CONFIG_DEBUG_INFO=y
1478# CONFIG_BACKTRACE_SELF_TEST is not set 1519# CONFIG_BACKTRACE_SELF_TEST is not set
1479# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1520# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1480# CONFIG_FAULT_INJECTION is not set 1521# CONFIG_FAULT_INJECTION is not set
1481# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1482 1522
1483# 1523#
1484# Tracers 1524# Tracers
@@ -1494,16 +1534,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1494# CONFIG_DEBUG_STACK_USAGE is not set 1534# CONFIG_DEBUG_STACK_USAGE is not set
1495CONFIG_DEBUG_VERBOSE=y 1535CONFIG_DEBUG_VERBOSE=y
1496CONFIG_DEBUG_MMRS=y 1536CONFIG_DEBUG_MMRS=y
1497# CONFIG_DEBUG_HWERR is not set 1537CONFIG_DEBUG_HWERR=y
1498# CONFIG_DEBUG_DOUBLEFAULT is not set 1538CONFIG_EXACT_HWERR=y
1539CONFIG_DEBUG_DOUBLEFAULT=y
1540CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1541# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1542# CONFIG_DEBUG_ICACHE_CHECK is not set
1499CONFIG_DEBUG_HUNT_FOR_ZERO=y 1543CONFIG_DEBUG_HUNT_FOR_ZERO=y
1500CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1544CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1501CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1545# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1502# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1546CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1503# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1547# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1504CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1548CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1505# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1549# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1506# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1550CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1507CONFIG_EARLY_PRINTK=y 1551CONFIG_EARLY_PRINTK=y
1508CONFIG_CPLB_INFO=y 1552CONFIG_CPLB_INFO=y
1509CONFIG_ACCESS_CHECK=y 1553CONFIG_ACCESS_CHECK=y
@@ -1611,13 +1655,12 @@ CONFIG_BITREVERSE=y
1611CONFIG_CRC_CCITT=m 1655CONFIG_CRC_CCITT=m
1612# CONFIG_CRC16 is not set 1656# CONFIG_CRC16 is not set
1613# CONFIG_CRC_T10DIF is not set 1657# CONFIG_CRC_T10DIF is not set
1614# CONFIG_CRC_ITU_T is not set 1658CONFIG_CRC_ITU_T=m
1615CONFIG_CRC32=y 1659CONFIG_CRC32=y
1616# CONFIG_CRC7 is not set 1660# CONFIG_CRC7 is not set
1617# CONFIG_LIBCRC32C is not set 1661# CONFIG_LIBCRC32C is not set
1618CONFIG_ZLIB_INFLATE=y 1662CONFIG_ZLIB_INFLATE=y
1619CONFIG_ZLIB_DEFLATE=m 1663CONFIG_ZLIB_DEFLATE=m
1620CONFIG_PLIST=y
1621CONFIG_HAS_IOMEM=y 1664CONFIG_HAS_IOMEM=y
1622CONFIG_HAS_IOPORT=y 1665CONFIG_HAS_IOPORT=y
1623CONFIG_HAS_DMA=y 1666CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 4c41e03efe0f..5aa63bafdd62 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -225,7 +225,10 @@ CONFIG_HZ=250
225CONFIG_SCHED_HRTICK=y 225CONFIG_SCHED_HRTICK=y
226CONFIG_GENERIC_TIME=y 226CONFIG_GENERIC_TIME=y
227CONFIG_GENERIC_CLOCKEVENTS=y 227CONFIG_GENERIC_CLOCKEVENTS=y
228# CONFIG_TICKSOURCE_GPTMR0 is not set
229CONFIG_TICKSOURCE_CORETMR=y
228# CONFIG_CYCLES_CLOCKSOURCE is not set 230# CONFIG_CYCLES_CLOCKSOURCE is not set
231# CONFIG_GPTMR0_CLOCKSOURCE is not set
229CONFIG_TICK_ONESHOT=y 232CONFIG_TICK_ONESHOT=y
230# CONFIG_NO_HZ is not set 233# CONFIG_NO_HZ is not set
231CONFIG_HIGH_RES_TIMERS=y 234CONFIG_HIGH_RES_TIMERS=y
@@ -382,7 +385,7 @@ CONFIG_IP_PNP=y
382# CONFIG_NET_IPIP is not set 385# CONFIG_NET_IPIP is not set
383# CONFIG_NET_IPGRE is not set 386# CONFIG_NET_IPGRE is not set
384# CONFIG_ARPD is not set 387# CONFIG_ARPD is not set
385CONFIG_SYN_COOKIES=y 388# CONFIG_SYN_COOKIES is not set
386# CONFIG_INET_AH is not set 389# CONFIG_INET_AH is not set
387# CONFIG_INET_ESP is not set 390# CONFIG_INET_ESP is not set
388# CONFIG_INET_IPCOMP is not set 391# CONFIG_INET_IPCOMP is not set
@@ -613,9 +616,8 @@ CONFIG_SMC91X=y
613# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 616# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
614# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 617# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
615# CONFIG_B44 is not set 618# CONFIG_B44 is not set
616CONFIG_NETDEV_1000=y 619# CONFIG_NETDEV_1000 is not set
617# CONFIG_AX88180 is not set 620# CONFIG_NETDEV_10000 is not set
618CONFIG_NETDEV_10000=y
619 621
620# 622#
621# Wireless LAN 623# Wireless LAN
@@ -667,13 +669,13 @@ CONFIG_INPUT_EVDEV=m
667# Character devices 669# Character devices
668# 670#
669# CONFIG_AD9960 is not set 671# CONFIG_AD9960 is not set
670# CONFIG_SPI_ADC_BF533 is not set 672CONFIG_BFIN_DMA_INTERFACE=m
671# CONFIG_BF5xx_PPIFCD is not set 673# CONFIG_BFIN_PPI is not set
674# CONFIG_BFIN_PPIFCD is not set
672# CONFIG_BFIN_SIMPLE_TIMER is not set 675# CONFIG_BFIN_SIMPLE_TIMER is not set
673# CONFIG_BF5xx_PPI is not set 676# CONFIG_BFIN_SPI_ADC is not set
674CONFIG_BFIN_SPORT=y 677CONFIG_BFIN_SPORT=y
675# CONFIG_BFIN_TIMER_LATENCY is not set 678# CONFIG_BFIN_TIMER_LATENCY is not set
676CONFIG_BFIN_DMA_INTERFACE=m
677CONFIG_SIMPLE_GPIO=m 679CONFIG_SIMPLE_GPIO=m
678# CONFIG_VT is not set 680# CONFIG_VT is not set
679# CONFIG_DEVKMEM is not set 681# CONFIG_DEVKMEM is not set
@@ -729,7 +731,30 @@ CONFIG_SPI_BFIN=y
729# CONFIG_SPI_SPIDEV is not set 731# CONFIG_SPI_SPIDEV is not set
730# CONFIG_SPI_TLE62X0 is not set 732# CONFIG_SPI_TLE62X0 is not set
731CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 733CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
732# CONFIG_GPIOLIB is not set 734CONFIG_GPIOLIB=y
735# CONFIG_DEBUG_GPIO is not set
736CONFIG_GPIO_SYSFS=y
737
738#
739# Memory mapped GPIO expanders:
740#
741
742#
743# I2C GPIO expanders:
744#
745# CONFIG_GPIO_MAX732X is not set
746# CONFIG_GPIO_PCA953X is not set
747# CONFIG_GPIO_PCF857X is not set
748
749#
750# PCI GPIO expanders:
751#
752
753#
754# SPI GPIO expanders:
755#
756# CONFIG_GPIO_MAX7301 is not set
757# CONFIG_GPIO_MCP23S08 is not set
733# CONFIG_W1 is not set 758# CONFIG_W1 is not set
734# CONFIG_POWER_SUPPLY is not set 759# CONFIG_POWER_SUPPLY is not set
735# CONFIG_HWMON is not set 760# CONFIG_HWMON is not set
@@ -904,16 +929,6 @@ CONFIG_SYSFS=y
904# CONFIG_BEFS_FS is not set 929# CONFIG_BEFS_FS is not set
905# CONFIG_BFS_FS is not set 930# CONFIG_BFS_FS is not set
906# CONFIG_EFS_FS is not set 931# CONFIG_EFS_FS is not set
907CONFIG_YAFFS_FS=m
908CONFIG_YAFFS_YAFFS1=y
909# CONFIG_YAFFS_9BYTE_TAGS is not set
910# CONFIG_YAFFS_DOES_ECC is not set
911CONFIG_YAFFS_YAFFS2=y
912CONFIG_YAFFS_AUTO_YAFFS2=y
913# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
914# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
915# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
916CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
917CONFIG_JFFS2_FS=m 932CONFIG_JFFS2_FS=m
918CONFIG_JFFS2_FS_DEBUG=0 933CONFIG_JFFS2_FS_DEBUG=0
919CONFIG_JFFS2_FS_WRITEBUFFER=y 934CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -925,6 +940,16 @@ CONFIG_JFFS2_ZLIB=y
925# CONFIG_JFFS2_LZO is not set 940# CONFIG_JFFS2_LZO is not set
926CONFIG_JFFS2_RTIME=y 941CONFIG_JFFS2_RTIME=y
927# CONFIG_JFFS2_RUBIN is not set 942# CONFIG_JFFS2_RUBIN is not set
943CONFIG_YAFFS_FS=m
944CONFIG_YAFFS_YAFFS1=y
945# CONFIG_YAFFS_9BYTE_TAGS is not set
946# CONFIG_YAFFS_DOES_ECC is not set
947CONFIG_YAFFS_YAFFS2=y
948CONFIG_YAFFS_AUTO_YAFFS2=y
949# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
950# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
951# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
952CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
928# CONFIG_CRAMFS is not set 953# CONFIG_CRAMFS is not set
929# CONFIG_VXFS_FS is not set 954# CONFIG_VXFS_FS is not set
930# CONFIG_MINIX_FS is not set 955# CONFIG_MINIX_FS is not set
@@ -1013,7 +1038,7 @@ CONFIG_FRAME_WARN=1024
1013CONFIG_DEBUG_FS=y 1038CONFIG_DEBUG_FS=y
1014# CONFIG_HEADERS_CHECK is not set 1039# CONFIG_HEADERS_CHECK is not set
1015CONFIG_DEBUG_KERNEL=y 1040CONFIG_DEBUG_KERNEL=y
1016# CONFIG_DEBUG_SHIRQ is not set 1041CONFIG_DEBUG_SHIRQ=y
1017CONFIG_DETECT_SOFTLOCKUP=y 1042CONFIG_DETECT_SOFTLOCKUP=y
1018# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1043# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1019CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1044CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1022,8 +1047,6 @@ CONFIG_SCHED_DEBUG=y
1022# CONFIG_TIMER_STATS is not set 1047# CONFIG_TIMER_STATS is not set
1023# CONFIG_DEBUG_OBJECTS is not set 1048# CONFIG_DEBUG_OBJECTS is not set
1024# CONFIG_DEBUG_SLAB is not set 1049# CONFIG_DEBUG_SLAB is not set
1025# CONFIG_DEBUG_RT_MUTEXES is not set
1026# CONFIG_RT_MUTEX_TESTER is not set
1027# CONFIG_DEBUG_SPINLOCK is not set 1050# CONFIG_DEBUG_SPINLOCK is not set
1028# CONFIG_DEBUG_MUTEXES is not set 1051# CONFIG_DEBUG_MUTEXES is not set
1029# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1052# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1043,7 +1066,6 @@ CONFIG_DEBUG_INFO=y
1043# CONFIG_BACKTRACE_SELF_TEST is not set 1066# CONFIG_BACKTRACE_SELF_TEST is not set
1044# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1067# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1045# CONFIG_FAULT_INJECTION is not set 1068# CONFIG_FAULT_INJECTION is not set
1046# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1047 1069
1048# 1070#
1049# Tracers 1071# Tracers
@@ -1059,16 +1081,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1059# CONFIG_DEBUG_STACK_USAGE is not set 1081# CONFIG_DEBUG_STACK_USAGE is not set
1060CONFIG_DEBUG_VERBOSE=y 1082CONFIG_DEBUG_VERBOSE=y
1061CONFIG_DEBUG_MMRS=y 1083CONFIG_DEBUG_MMRS=y
1062# CONFIG_DEBUG_HWERR is not set 1084CONFIG_DEBUG_HWERR=y
1063# CONFIG_DEBUG_DOUBLEFAULT is not set 1085CONFIG_EXACT_HWERR=y
1086CONFIG_DEBUG_DOUBLEFAULT=y
1087CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1088# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1089# CONFIG_DEBUG_ICACHE_CHECK is not set
1064CONFIG_DEBUG_HUNT_FOR_ZERO=y 1090CONFIG_DEBUG_HUNT_FOR_ZERO=y
1065CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1091CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1066CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1092# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1067# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1093CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1068# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1094# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1069CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1095CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1070# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1096# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1071# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1097CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1072CONFIG_EARLY_PRINTK=y 1098CONFIG_EARLY_PRINTK=y
1073CONFIG_CPLB_INFO=y 1099CONFIG_CPLB_INFO=y
1074CONFIG_ACCESS_CHECK=y 1100CONFIG_ACCESS_CHECK=y
@@ -1181,7 +1207,6 @@ CONFIG_CRC32=y
1181# CONFIG_LIBCRC32C is not set 1207# CONFIG_LIBCRC32C is not set
1182CONFIG_ZLIB_INFLATE=y 1208CONFIG_ZLIB_INFLATE=y
1183CONFIG_ZLIB_DEFLATE=m 1209CONFIG_ZLIB_DEFLATE=m
1184CONFIG_PLIST=y
1185CONFIG_HAS_IOMEM=y 1210CONFIG_HAS_IOMEM=y
1186CONFIG_HAS_IOPORT=y 1211CONFIG_HAS_IOPORT=y
1187CONFIG_HAS_DMA=y 1212CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 9c482cd1b343..fed25329e13c 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -225,7 +225,10 @@ CONFIG_HZ=250
225CONFIG_SCHED_HRTICK=y 225CONFIG_SCHED_HRTICK=y
226CONFIG_GENERIC_TIME=y 226CONFIG_GENERIC_TIME=y
227CONFIG_GENERIC_CLOCKEVENTS=y 227CONFIG_GENERIC_CLOCKEVENTS=y
228# CONFIG_TICKSOURCE_GPTMR0 is not set
229CONFIG_TICKSOURCE_CORETMR=y
228# CONFIG_CYCLES_CLOCKSOURCE is not set 230# CONFIG_CYCLES_CLOCKSOURCE is not set
231# CONFIG_GPTMR0_CLOCKSOURCE is not set
229CONFIG_TICK_ONESHOT=y 232CONFIG_TICK_ONESHOT=y
230# CONFIG_NO_HZ is not set 233# CONFIG_NO_HZ is not set
231CONFIG_HIGH_RES_TIMERS=y 234CONFIG_HIGH_RES_TIMERS=y
@@ -382,7 +385,7 @@ CONFIG_IP_PNP=y
382# CONFIG_NET_IPIP is not set 385# CONFIG_NET_IPIP is not set
383# CONFIG_NET_IPGRE is not set 386# CONFIG_NET_IPGRE is not set
384# CONFIG_ARPD is not set 387# CONFIG_ARPD is not set
385CONFIG_SYN_COOKIES=y 388# CONFIG_SYN_COOKIES is not set
386# CONFIG_INET_AH is not set 389# CONFIG_INET_AH is not set
387# CONFIG_INET_ESP is not set 390# CONFIG_INET_ESP is not set
388# CONFIG_INET_IPCOMP is not set 391# CONFIG_INET_IPCOMP is not set
@@ -618,9 +621,8 @@ CONFIG_SMC91X=y
618# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 621# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
619# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 622# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
620# CONFIG_B44 is not set 623# CONFIG_B44 is not set
621CONFIG_NETDEV_1000=y 624# CONFIG_NETDEV_1000 is not set
622# CONFIG_AX88180 is not set 625# CONFIG_NETDEV_10000 is not set
623CONFIG_NETDEV_10000=y
624 626
625# 627#
626# Wireless LAN 628# Wireless LAN
@@ -674,14 +676,14 @@ CONFIG_CONFIG_INPUT_PCF8574=m
674# Character devices 676# Character devices
675# 677#
676# CONFIG_AD9960 is not set 678# CONFIG_AD9960 is not set
677# CONFIG_SPI_ADC_BF533 is not set 679CONFIG_BFIN_DMA_INTERFACE=m
678# CONFIG_BF5xx_PPIFCD is not set 680# CONFIG_BFIN_PPI is not set
681# CONFIG_BFIN_PPIFCD is not set
679# CONFIG_BFIN_SIMPLE_TIMER is not set 682# CONFIG_BFIN_SIMPLE_TIMER is not set
680CONFIG_BF5xx_PPI=m 683# CONFIG_BFIN_SPI_ADC is not set
681CONFIG_BFIN_SPORT=m 684CONFIG_BFIN_SPORT=m
682# CONFIG_BFIN_TIMER_LATENCY is not set 685# CONFIG_BFIN_TIMER_LATENCY is not set
683# CONFIG_TWI_LCD is not set 686# CONFIG_BFIN_TWI_LCD is not set
684CONFIG_BFIN_DMA_INTERFACE=m
685CONFIG_SIMPLE_GPIO=m 687CONFIG_SIMPLE_GPIO=m
686# CONFIG_VT is not set 688# CONFIG_VT is not set
687# CONFIG_DEVKMEM is not set 689# CONFIG_DEVKMEM is not set
@@ -781,7 +783,30 @@ CONFIG_SPI_BFIN=y
781# CONFIG_SPI_SPIDEV is not set 783# CONFIG_SPI_SPIDEV is not set
782# CONFIG_SPI_TLE62X0 is not set 784# CONFIG_SPI_TLE62X0 is not set
783CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 785CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
784# CONFIG_GPIOLIB is not set 786CONFIG_GPIOLIB=y
787# CONFIG_DEBUG_GPIO is not set
788CONFIG_GPIO_SYSFS=y
789
790#
791# Memory mapped GPIO expanders:
792#
793
794#
795# I2C GPIO expanders:
796#
797# CONFIG_GPIO_MAX732X is not set
798# CONFIG_GPIO_PCA953X is not set
799# CONFIG_GPIO_PCF857X is not set
800
801#
802# PCI GPIO expanders:
803#
804
805#
806# SPI GPIO expanders:
807#
808# CONFIG_GPIO_MAX7301 is not set
809# CONFIG_GPIO_MCP23S08 is not set
785# CONFIG_W1 is not set 810# CONFIG_W1 is not set
786# CONFIG_POWER_SUPPLY is not set 811# CONFIG_POWER_SUPPLY is not set
787# CONFIG_HWMON is not set 812# CONFIG_HWMON is not set
@@ -1068,16 +1093,6 @@ CONFIG_SYSFS=y
1068# CONFIG_BEFS_FS is not set 1093# CONFIG_BEFS_FS is not set
1069# CONFIG_BFS_FS is not set 1094# CONFIG_BFS_FS is not set
1070# CONFIG_EFS_FS is not set 1095# CONFIG_EFS_FS is not set
1071CONFIG_YAFFS_FS=m
1072CONFIG_YAFFS_YAFFS1=y
1073# CONFIG_YAFFS_9BYTE_TAGS is not set
1074# CONFIG_YAFFS_DOES_ECC is not set
1075CONFIG_YAFFS_YAFFS2=y
1076CONFIG_YAFFS_AUTO_YAFFS2=y
1077# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1078# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1079# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1080CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1081CONFIG_JFFS2_FS=m 1096CONFIG_JFFS2_FS=m
1082CONFIG_JFFS2_FS_DEBUG=0 1097CONFIG_JFFS2_FS_DEBUG=0
1083CONFIG_JFFS2_FS_WRITEBUFFER=y 1098CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -1089,6 +1104,16 @@ CONFIG_JFFS2_ZLIB=y
1089# CONFIG_JFFS2_LZO is not set 1104# CONFIG_JFFS2_LZO is not set
1090CONFIG_JFFS2_RTIME=y 1105CONFIG_JFFS2_RTIME=y
1091# CONFIG_JFFS2_RUBIN is not set 1106# CONFIG_JFFS2_RUBIN is not set
1107CONFIG_YAFFS_FS=m
1108CONFIG_YAFFS_YAFFS1=y
1109# CONFIG_YAFFS_9BYTE_TAGS is not set
1110# CONFIG_YAFFS_DOES_ECC is not set
1111CONFIG_YAFFS_YAFFS2=y
1112CONFIG_YAFFS_AUTO_YAFFS2=y
1113# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1114# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1115# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1116CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1092# CONFIG_CRAMFS is not set 1117# CONFIG_CRAMFS is not set
1093# CONFIG_VXFS_FS is not set 1118# CONFIG_VXFS_FS is not set
1094# CONFIG_MINIX_FS is not set 1119# CONFIG_MINIX_FS is not set
@@ -1177,7 +1202,7 @@ CONFIG_FRAME_WARN=1024
1177CONFIG_DEBUG_FS=y 1202CONFIG_DEBUG_FS=y
1178# CONFIG_HEADERS_CHECK is not set 1203# CONFIG_HEADERS_CHECK is not set
1179CONFIG_DEBUG_KERNEL=y 1204CONFIG_DEBUG_KERNEL=y
1180# CONFIG_DEBUG_SHIRQ is not set 1205CONFIG_DEBUG_SHIRQ=y
1181CONFIG_DETECT_SOFTLOCKUP=y 1206CONFIG_DETECT_SOFTLOCKUP=y
1182# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1207# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1183CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1208CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1186,8 +1211,6 @@ CONFIG_SCHED_DEBUG=y
1186# CONFIG_TIMER_STATS is not set 1211# CONFIG_TIMER_STATS is not set
1187# CONFIG_DEBUG_OBJECTS is not set 1212# CONFIG_DEBUG_OBJECTS is not set
1188# CONFIG_DEBUG_SLAB is not set 1213# CONFIG_DEBUG_SLAB is not set
1189# CONFIG_DEBUG_RT_MUTEXES is not set
1190# CONFIG_RT_MUTEX_TESTER is not set
1191# CONFIG_DEBUG_SPINLOCK is not set 1214# CONFIG_DEBUG_SPINLOCK is not set
1192# CONFIG_DEBUG_MUTEXES is not set 1215# CONFIG_DEBUG_MUTEXES is not set
1193# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1216# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1207,7 +1230,6 @@ CONFIG_DEBUG_INFO=y
1207# CONFIG_BACKTRACE_SELF_TEST is not set 1230# CONFIG_BACKTRACE_SELF_TEST is not set
1208# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1231# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1209# CONFIG_FAULT_INJECTION is not set 1232# CONFIG_FAULT_INJECTION is not set
1210# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1211 1233
1212# 1234#
1213# Tracers 1235# Tracers
@@ -1223,16 +1245,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1223# CONFIG_DEBUG_STACK_USAGE is not set 1245# CONFIG_DEBUG_STACK_USAGE is not set
1224CONFIG_DEBUG_VERBOSE=y 1246CONFIG_DEBUG_VERBOSE=y
1225CONFIG_DEBUG_MMRS=y 1247CONFIG_DEBUG_MMRS=y
1226# CONFIG_DEBUG_HWERR is not set 1248CONFIG_DEBUG_HWERR=y
1227# CONFIG_DEBUG_DOUBLEFAULT is not set 1249CONFIG_EXACT_HWERR=y
1250CONFIG_DEBUG_DOUBLEFAULT=y
1251CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1252# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1253# CONFIG_DEBUG_ICACHE_CHECK is not set
1228CONFIG_DEBUG_HUNT_FOR_ZERO=y 1254CONFIG_DEBUG_HUNT_FOR_ZERO=y
1229CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1255CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1230CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1256# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1231# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1257CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1232# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1258# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1233CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1259CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1234# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1260# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1235# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1261CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1236CONFIG_EARLY_PRINTK=y 1262CONFIG_EARLY_PRINTK=y
1237CONFIG_CPLB_INFO=y 1263CONFIG_CPLB_INFO=y
1238CONFIG_ACCESS_CHECK=y 1264CONFIG_ACCESS_CHECK=y
@@ -1345,7 +1371,6 @@ CONFIG_CRC32=y
1345# CONFIG_LIBCRC32C is not set 1371# CONFIG_LIBCRC32C is not set
1346CONFIG_ZLIB_INFLATE=y 1372CONFIG_ZLIB_INFLATE=y
1347CONFIG_ZLIB_DEFLATE=m 1373CONFIG_ZLIB_DEFLATE=m
1348CONFIG_PLIST=y
1349CONFIG_HAS_IOMEM=y 1374CONFIG_HAS_IOMEM=y
1350CONFIG_HAS_IOPORT=y 1375CONFIG_HAS_IOPORT=y
1351CONFIG_HAS_DMA=y 1376CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 591f6edda4f7..f9ac20d55799 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -232,7 +232,10 @@ CONFIG_HZ=250
232CONFIG_SCHED_HRTICK=y 232CONFIG_SCHED_HRTICK=y
233CONFIG_GENERIC_TIME=y 233CONFIG_GENERIC_TIME=y
234CONFIG_GENERIC_CLOCKEVENTS=y 234CONFIG_GENERIC_CLOCKEVENTS=y
235# CONFIG_TICKSOURCE_GPTMR0 is not set
236CONFIG_TICKSOURCE_CORETMR=y
235# CONFIG_CYCLES_CLOCKSOURCE is not set 237# CONFIG_CYCLES_CLOCKSOURCE is not set
238# CONFIG_GPTMR0_CLOCKSOURCE is not set
236CONFIG_TICK_ONESHOT=y 239CONFIG_TICK_ONESHOT=y
237# CONFIG_NO_HZ is not set 240# CONFIG_NO_HZ is not set
238CONFIG_HIGH_RES_TIMERS=y 241CONFIG_HIGH_RES_TIMERS=y
@@ -390,7 +393,7 @@ CONFIG_IP_PNP=y
390# CONFIG_NET_IPIP is not set 393# CONFIG_NET_IPIP is not set
391# CONFIG_NET_IPGRE is not set 394# CONFIG_NET_IPGRE is not set
392# CONFIG_ARPD is not set 395# CONFIG_ARPD is not set
393CONFIG_SYN_COOKIES=y 396# CONFIG_SYN_COOKIES is not set
394# CONFIG_INET_AH is not set 397# CONFIG_INET_AH is not set
395# CONFIG_INET_ESP is not set 398# CONFIG_INET_ESP is not set
396# CONFIG_INET_IPCOMP is not set 399# CONFIG_INET_IPCOMP is not set
@@ -548,9 +551,7 @@ CONFIG_MTD_ROM=m
548# 551#
549# CONFIG_MTD_COMPLEX_MAPPINGS is not set 552# CONFIG_MTD_COMPLEX_MAPPINGS is not set
550CONFIG_MTD_PHYSMAP=m 553CONFIG_MTD_PHYSMAP=m
551CONFIG_MTD_PHYSMAP_START=0x20000000 554# CONFIG_MTD_PHYSMAP_COMPAT is not set
552CONFIG_MTD_PHYSMAP_LEN=0x0
553CONFIG_MTD_PHYSMAP_BANKWIDTH=2
554# CONFIG_MTD_UCLINUX is not set 555# CONFIG_MTD_UCLINUX is not set
555# CONFIG_MTD_PLATRAM is not set 556# CONFIG_MTD_PLATRAM is not set
556 557
@@ -649,9 +650,8 @@ CONFIG_BFIN_RX_DESC_NUM=20
649# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 650# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
650# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 651# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
651# CONFIG_B44 is not set 652# CONFIG_B44 is not set
652CONFIG_NETDEV_1000=y 653# CONFIG_NETDEV_1000 is not set
653# CONFIG_AX88180 is not set 654# CONFIG_NETDEV_10000 is not set
654CONFIG_NETDEV_10000=y
655 655
656# 656#
657# Wireless LAN 657# Wireless LAN
@@ -708,14 +708,14 @@ CONFIG_SERIO_LIBPS2=y
708# Character devices 708# Character devices
709# 709#
710# CONFIG_AD9960 is not set 710# CONFIG_AD9960 is not set
711# CONFIG_SPI_ADC_BF533 is not set 711CONFIG_BFIN_DMA_INTERFACE=m
712# CONFIG_BF5xx_PPIFCD is not set 712# CONFIG_BFIN_PPI is not set
713# CONFIG_BFIN_PPIFCD is not set
713# CONFIG_BFIN_SIMPLE_TIMER is not set 714# CONFIG_BFIN_SIMPLE_TIMER is not set
714CONFIG_BF5xx_PPI=m 715# CONFIG_BFIN_SPI_ADC is not set
715CONFIG_BFIN_SPORT=m 716CONFIG_BFIN_SPORT=m
716# CONFIG_BFIN_TIMER_LATENCY is not set 717# CONFIG_BFIN_TIMER_LATENCY is not set
717# CONFIG_TWI_LCD is not set 718# CONFIG_BFIN_TWI_LCD is not set
718CONFIG_BFIN_DMA_INTERFACE=m
719CONFIG_SIMPLE_GPIO=m 719CONFIG_SIMPLE_GPIO=m
720# CONFIG_VT is not set 720# CONFIG_VT is not set
721# CONFIG_DEVKMEM is not set 721# CONFIG_DEVKMEM is not set
@@ -823,7 +823,30 @@ CONFIG_SPI_BFIN=y
823# CONFIG_SPI_SPIDEV is not set 823# CONFIG_SPI_SPIDEV is not set
824# CONFIG_SPI_TLE62X0 is not set 824# CONFIG_SPI_TLE62X0 is not set
825CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 825CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
826# CONFIG_GPIOLIB is not set 826CONFIG_GPIOLIB=y
827# CONFIG_DEBUG_GPIO is not set
828CONFIG_GPIO_SYSFS=y
829
830#
831# Memory mapped GPIO expanders:
832#
833
834#
835# I2C GPIO expanders:
836#
837# CONFIG_GPIO_MAX732X is not set
838# CONFIG_GPIO_PCA953X is not set
839# CONFIG_GPIO_PCF857X is not set
840
841#
842# PCI GPIO expanders:
843#
844
845#
846# SPI GPIO expanders:
847#
848# CONFIG_GPIO_MAX7301 is not set
849# CONFIG_GPIO_MCP23S08 is not set
827# CONFIG_W1 is not set 850# CONFIG_W1 is not set
828# CONFIG_POWER_SUPPLY is not set 851# CONFIG_POWER_SUPPLY is not set
829# CONFIG_HWMON is not set 852# CONFIG_HWMON is not set
@@ -1123,16 +1146,6 @@ CONFIG_SYSFS=y
1123# CONFIG_BEFS_FS is not set 1146# CONFIG_BEFS_FS is not set
1124# CONFIG_BFS_FS is not set 1147# CONFIG_BFS_FS is not set
1125# CONFIG_EFS_FS is not set 1148# CONFIG_EFS_FS is not set
1126CONFIG_YAFFS_FS=m
1127CONFIG_YAFFS_YAFFS1=y
1128# CONFIG_YAFFS_9BYTE_TAGS is not set
1129# CONFIG_YAFFS_DOES_ECC is not set
1130CONFIG_YAFFS_YAFFS2=y
1131CONFIG_YAFFS_AUTO_YAFFS2=y
1132# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1133# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1134# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1135CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1136CONFIG_JFFS2_FS=m 1149CONFIG_JFFS2_FS=m
1137CONFIG_JFFS2_FS_DEBUG=0 1150CONFIG_JFFS2_FS_DEBUG=0
1138CONFIG_JFFS2_FS_WRITEBUFFER=y 1151CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -1144,6 +1157,16 @@ CONFIG_JFFS2_ZLIB=y
1144# CONFIG_JFFS2_LZO is not set 1157# CONFIG_JFFS2_LZO is not set
1145CONFIG_JFFS2_RTIME=y 1158CONFIG_JFFS2_RTIME=y
1146# CONFIG_JFFS2_RUBIN is not set 1159# CONFIG_JFFS2_RUBIN is not set
1160CONFIG_YAFFS_FS=m
1161CONFIG_YAFFS_YAFFS1=y
1162# CONFIG_YAFFS_9BYTE_TAGS is not set
1163# CONFIG_YAFFS_DOES_ECC is not set
1164CONFIG_YAFFS_YAFFS2=y
1165CONFIG_YAFFS_AUTO_YAFFS2=y
1166# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1167# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1168# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1169CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1147# CONFIG_CRAMFS is not set 1170# CONFIG_CRAMFS is not set
1148# CONFIG_VXFS_FS is not set 1171# CONFIG_VXFS_FS is not set
1149# CONFIG_MINIX_FS is not set 1172# CONFIG_MINIX_FS is not set
@@ -1232,7 +1255,7 @@ CONFIG_FRAME_WARN=1024
1232CONFIG_DEBUG_FS=y 1255CONFIG_DEBUG_FS=y
1233# CONFIG_HEADERS_CHECK is not set 1256# CONFIG_HEADERS_CHECK is not set
1234CONFIG_DEBUG_KERNEL=y 1257CONFIG_DEBUG_KERNEL=y
1235# CONFIG_DEBUG_SHIRQ is not set 1258CONFIG_DEBUG_SHIRQ=y
1236CONFIG_DETECT_SOFTLOCKUP=y 1259CONFIG_DETECT_SOFTLOCKUP=y
1237# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1260# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1238CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1261CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1241,8 +1264,6 @@ CONFIG_SCHED_DEBUG=y
1241# CONFIG_TIMER_STATS is not set 1264# CONFIG_TIMER_STATS is not set
1242# CONFIG_DEBUG_OBJECTS is not set 1265# CONFIG_DEBUG_OBJECTS is not set
1243# CONFIG_DEBUG_SLAB is not set 1266# CONFIG_DEBUG_SLAB is not set
1244# CONFIG_DEBUG_RT_MUTEXES is not set
1245# CONFIG_RT_MUTEX_TESTER is not set
1246# CONFIG_DEBUG_SPINLOCK is not set 1267# CONFIG_DEBUG_SPINLOCK is not set
1247# CONFIG_DEBUG_MUTEXES is not set 1268# CONFIG_DEBUG_MUTEXES is not set
1248# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1269# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1262,7 +1283,6 @@ CONFIG_DEBUG_INFO=y
1262# CONFIG_BACKTRACE_SELF_TEST is not set 1283# CONFIG_BACKTRACE_SELF_TEST is not set
1263# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1284# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1264# CONFIG_FAULT_INJECTION is not set 1285# CONFIG_FAULT_INJECTION is not set
1265# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1266 1286
1267# 1287#
1268# Tracers 1288# Tracers
@@ -1278,16 +1298,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1278# CONFIG_DEBUG_STACK_USAGE is not set 1298# CONFIG_DEBUG_STACK_USAGE is not set
1279CONFIG_DEBUG_VERBOSE=y 1299CONFIG_DEBUG_VERBOSE=y
1280CONFIG_DEBUG_MMRS=y 1300CONFIG_DEBUG_MMRS=y
1281# CONFIG_DEBUG_HWERR is not set 1301CONFIG_DEBUG_HWERR=y
1282# CONFIG_DEBUG_DOUBLEFAULT is not set 1302CONFIG_EXACT_HWERR=y
1303CONFIG_DEBUG_DOUBLEFAULT=y
1304CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1305# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1306# CONFIG_DEBUG_ICACHE_CHECK is not set
1283CONFIG_DEBUG_HUNT_FOR_ZERO=y 1307CONFIG_DEBUG_HUNT_FOR_ZERO=y
1284CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1308CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1285CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1309# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1286# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1310CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1287# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1311# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1288CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1312CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1289# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1313# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1290# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1314CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1291CONFIG_EARLY_PRINTK=y 1315CONFIG_EARLY_PRINTK=y
1292CONFIG_CPLB_INFO=y 1316CONFIG_CPLB_INFO=y
1293CONFIG_ACCESS_CHECK=y 1317CONFIG_ACCESS_CHECK=y
@@ -1400,7 +1424,6 @@ CONFIG_CRC32=y
1400# CONFIG_LIBCRC32C is not set 1424# CONFIG_LIBCRC32C is not set
1401CONFIG_ZLIB_INFLATE=y 1425CONFIG_ZLIB_INFLATE=y
1402CONFIG_ZLIB_DEFLATE=m 1426CONFIG_ZLIB_DEFLATE=m
1403CONFIG_PLIST=y
1404CONFIG_HAS_IOMEM=y 1427CONFIG_HAS_IOMEM=y
1405CONFIG_HAS_IOPORT=y 1428CONFIG_HAS_IOPORT=y
1406CONFIG_HAS_DMA=y 1429CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index 1a8e8c3adf98..ee98e227b887 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -243,7 +243,10 @@ CONFIG_HZ=250
243CONFIG_SCHED_HRTICK=y 243CONFIG_SCHED_HRTICK=y
244CONFIG_GENERIC_TIME=y 244CONFIG_GENERIC_TIME=y
245CONFIG_GENERIC_CLOCKEVENTS=y 245CONFIG_GENERIC_CLOCKEVENTS=y
246# CONFIG_TICKSOURCE_GPTMR0 is not set
247CONFIG_TICKSOURCE_CORETMR=y
246# CONFIG_CYCLES_CLOCKSOURCE is not set 248# CONFIG_CYCLES_CLOCKSOURCE is not set
249# CONFIG_GPTMR0_CLOCKSOURCE is not set
247CONFIG_TICK_ONESHOT=y 250CONFIG_TICK_ONESHOT=y
248# CONFIG_NO_HZ is not set 251# CONFIG_NO_HZ is not set
249CONFIG_HIGH_RES_TIMERS=y 252CONFIG_HIGH_RES_TIMERS=y
@@ -389,7 +392,7 @@ CONFIG_IP_PNP=y
389# CONFIG_NET_IPIP is not set 392# CONFIG_NET_IPIP is not set
390# CONFIG_NET_IPGRE is not set 393# CONFIG_NET_IPGRE is not set
391# CONFIG_ARPD is not set 394# CONFIG_ARPD is not set
392CONFIG_SYN_COOKIES=y 395# CONFIG_SYN_COOKIES is not set
393# CONFIG_INET_AH is not set 396# CONFIG_INET_AH is not set
394# CONFIG_INET_ESP is not set 397# CONFIG_INET_ESP is not set
395# CONFIG_INET_IPCOMP is not set 398# CONFIG_INET_IPCOMP is not set
@@ -546,9 +549,7 @@ CONFIG_MTD_ROM=m
546# 549#
547# CONFIG_MTD_COMPLEX_MAPPINGS is not set 550# CONFIG_MTD_COMPLEX_MAPPINGS is not set
548CONFIG_MTD_PHYSMAP=m 551CONFIG_MTD_PHYSMAP=m
549CONFIG_MTD_PHYSMAP_START=0x20000000 552# CONFIG_MTD_PHYSMAP_COMPAT is not set
550CONFIG_MTD_PHYSMAP_LEN=0x0
551CONFIG_MTD_PHYSMAP_BANKWIDTH=2
552# CONFIG_MTD_UCLINUX is not set 553# CONFIG_MTD_UCLINUX is not set
553# CONFIG_MTD_PLATRAM is not set 554# CONFIG_MTD_PLATRAM is not set
554 555
@@ -691,11 +692,11 @@ CONFIG_INPUT_EVDEV=m
691# CONFIG_INPUT_JOYSTICK is not set 692# CONFIG_INPUT_JOYSTICK is not set
692# CONFIG_INPUT_TABLET is not set 693# CONFIG_INPUT_TABLET is not set
693CONFIG_INPUT_TOUCHSCREEN=y 694CONFIG_INPUT_TOUCHSCREEN=y
694# CONFIG_TOUCHSCREEN_ADS7846 is not set
695# CONFIG_TOUCHSCREEN_AD7877 is not set 695# CONFIG_TOUCHSCREEN_AD7877 is not set
696# CONFIG_TOUCHSCREEN_AD7879_I2C is not set 696# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
697CONFIG_TOUCHSCREEN_AD7879_SPI=y 697CONFIG_TOUCHSCREEN_AD7879_SPI=y
698CONFIG_TOUCHSCREEN_AD7879=y 698CONFIG_TOUCHSCREEN_AD7879=y
699# CONFIG_TOUCHSCREEN_ADS7846 is not set
699# CONFIG_TOUCHSCREEN_FUJITSU is not set 700# CONFIG_TOUCHSCREEN_FUJITSU is not set
700# CONFIG_TOUCHSCREEN_GUNZE is not set 701# CONFIG_TOUCHSCREEN_GUNZE is not set
701# CONFIG_TOUCHSCREEN_ELO is not set 702# CONFIG_TOUCHSCREEN_ELO is not set
@@ -720,14 +721,14 @@ CONFIG_INPUT_MISC=y
720# Character devices 721# Character devices
721# 722#
722# CONFIG_AD9960 is not set 723# CONFIG_AD9960 is not set
723# CONFIG_SPI_ADC_BF533 is not set 724CONFIG_BFIN_DMA_INTERFACE=m
724# CONFIG_BF5xx_PPIFCD is not set 725# CONFIG_BFIN_PPI is not set
726# CONFIG_BFIN_PPIFCD is not set
725# CONFIG_BFIN_SIMPLE_TIMER is not set 727# CONFIG_BFIN_SIMPLE_TIMER is not set
726CONFIG_BF5xx_PPI=m 728# CONFIG_BFIN_SPI_ADC is not set
727CONFIG_BFIN_SPORT=m 729CONFIG_BFIN_SPORT=m
728# CONFIG_BFIN_TIMER_LATENCY is not set 730# CONFIG_BFIN_TIMER_LATENCY is not set
729# CONFIG_TWI_LCD is not set 731# CONFIG_BFIN_TWI_LCD is not set
730CONFIG_BFIN_DMA_INTERFACE=m
731CONFIG_SIMPLE_GPIO=m 732CONFIG_SIMPLE_GPIO=m
732# CONFIG_VT is not set 733# CONFIG_VT is not set
733# CONFIG_DEVKMEM is not set 734# CONFIG_DEVKMEM is not set
@@ -833,7 +834,30 @@ CONFIG_SPI_BFIN=y
833# CONFIG_SPI_SPIDEV is not set 834# CONFIG_SPI_SPIDEV is not set
834# CONFIG_SPI_TLE62X0 is not set 835# CONFIG_SPI_TLE62X0 is not set
835CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 836CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
836# CONFIG_GPIOLIB is not set 837CONFIG_GPIOLIB=y
838# CONFIG_DEBUG_GPIO is not set
839CONFIG_GPIO_SYSFS=y
840
841#
842# Memory mapped GPIO expanders:
843#
844
845#
846# I2C GPIO expanders:
847#
848# CONFIG_GPIO_MAX732X is not set
849# CONFIG_GPIO_PCA953X is not set
850# CONFIG_GPIO_PCF857X is not set
851
852#
853# PCI GPIO expanders:
854#
855
856#
857# SPI GPIO expanders:
858#
859# CONFIG_GPIO_MAX7301 is not set
860# CONFIG_GPIO_MCP23S08 is not set
837# CONFIG_W1 is not set 861# CONFIG_W1 is not set
838# CONFIG_POWER_SUPPLY is not set 862# CONFIG_POWER_SUPPLY is not set
839# CONFIG_HWMON is not set 863# CONFIG_HWMON is not set
@@ -1056,16 +1080,6 @@ CONFIG_SYSFS=y
1056# CONFIG_BEFS_FS is not set 1080# CONFIG_BEFS_FS is not set
1057# CONFIG_BFS_FS is not set 1081# CONFIG_BFS_FS is not set
1058# CONFIG_EFS_FS is not set 1082# CONFIG_EFS_FS is not set
1059CONFIG_YAFFS_FS=m
1060CONFIG_YAFFS_YAFFS1=y
1061# CONFIG_YAFFS_9BYTE_TAGS is not set
1062# CONFIG_YAFFS_DOES_ECC is not set
1063CONFIG_YAFFS_YAFFS2=y
1064CONFIG_YAFFS_AUTO_YAFFS2=y
1065# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1066# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1067# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1068CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1069CONFIG_JFFS2_FS=m 1083CONFIG_JFFS2_FS=m
1070CONFIG_JFFS2_FS_DEBUG=0 1084CONFIG_JFFS2_FS_DEBUG=0
1071CONFIG_JFFS2_FS_WRITEBUFFER=y 1085CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -1077,6 +1091,16 @@ CONFIG_JFFS2_ZLIB=y
1077# CONFIG_JFFS2_LZO is not set 1091# CONFIG_JFFS2_LZO is not set
1078CONFIG_JFFS2_RTIME=y 1092CONFIG_JFFS2_RTIME=y
1079# CONFIG_JFFS2_RUBIN is not set 1093# CONFIG_JFFS2_RUBIN is not set
1094CONFIG_YAFFS_FS=m
1095CONFIG_YAFFS_YAFFS1=y
1096# CONFIG_YAFFS_9BYTE_TAGS is not set
1097# CONFIG_YAFFS_DOES_ECC is not set
1098CONFIG_YAFFS_YAFFS2=y
1099CONFIG_YAFFS_AUTO_YAFFS2=y
1100# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1101# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1102# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1103CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1080# CONFIG_CRAMFS is not set 1104# CONFIG_CRAMFS is not set
1081# CONFIG_VXFS_FS is not set 1105# CONFIG_VXFS_FS is not set
1082# CONFIG_MINIX_FS is not set 1106# CONFIG_MINIX_FS is not set
@@ -1165,7 +1189,7 @@ CONFIG_FRAME_WARN=1024
1165CONFIG_DEBUG_FS=y 1189CONFIG_DEBUG_FS=y
1166# CONFIG_HEADERS_CHECK is not set 1190# CONFIG_HEADERS_CHECK is not set
1167CONFIG_DEBUG_KERNEL=y 1191CONFIG_DEBUG_KERNEL=y
1168# CONFIG_DEBUG_SHIRQ is not set 1192CONFIG_DEBUG_SHIRQ=y
1169CONFIG_DETECT_SOFTLOCKUP=y 1193CONFIG_DETECT_SOFTLOCKUP=y
1170# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1194# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1171CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1195CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1174,8 +1198,6 @@ CONFIG_SCHED_DEBUG=y
1174# CONFIG_TIMER_STATS is not set 1198# CONFIG_TIMER_STATS is not set
1175# CONFIG_DEBUG_OBJECTS is not set 1199# CONFIG_DEBUG_OBJECTS is not set
1176# CONFIG_DEBUG_SLAB is not set 1200# CONFIG_DEBUG_SLAB is not set
1177# CONFIG_DEBUG_RT_MUTEXES is not set
1178# CONFIG_RT_MUTEX_TESTER is not set
1179# CONFIG_DEBUG_SPINLOCK is not set 1201# CONFIG_DEBUG_SPINLOCK is not set
1180# CONFIG_DEBUG_MUTEXES is not set 1202# CONFIG_DEBUG_MUTEXES is not set
1181# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1203# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1195,7 +1217,6 @@ CONFIG_DEBUG_INFO=y
1195# CONFIG_BACKTRACE_SELF_TEST is not set 1217# CONFIG_BACKTRACE_SELF_TEST is not set
1196# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1218# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1197# CONFIG_FAULT_INJECTION is not set 1219# CONFIG_FAULT_INJECTION is not set
1198CONFIG_SYSCTL_SYSCALL_CHECK=y
1199 1220
1200# 1221#
1201# Tracers 1222# Tracers
@@ -1211,16 +1232,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1211# CONFIG_DEBUG_STACK_USAGE is not set 1232# CONFIG_DEBUG_STACK_USAGE is not set
1212CONFIG_DEBUG_VERBOSE=y 1233CONFIG_DEBUG_VERBOSE=y
1213CONFIG_DEBUG_MMRS=y 1234CONFIG_DEBUG_MMRS=y
1214# CONFIG_DEBUG_HWERR is not set 1235CONFIG_DEBUG_HWERR=y
1215# CONFIG_DEBUG_DOUBLEFAULT is not set 1236CONFIG_EXACT_HWERR=y
1237CONFIG_DEBUG_DOUBLEFAULT=y
1238CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1239# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1240# CONFIG_DEBUG_ICACHE_CHECK is not set
1216CONFIG_DEBUG_HUNT_FOR_ZERO=y 1241CONFIG_DEBUG_HUNT_FOR_ZERO=y
1217CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1242CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1218CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1243# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1219# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1244CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1220# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1245# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1221CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1246CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1222# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1247# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1223# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1248CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1224CONFIG_EARLY_PRINTK=y 1249CONFIG_EARLY_PRINTK=y
1225CONFIG_CPLB_INFO=y 1250CONFIG_CPLB_INFO=y
1226CONFIG_ACCESS_CHECK=y 1251CONFIG_ACCESS_CHECK=y
@@ -1333,7 +1358,6 @@ CONFIG_CRC32=y
1333# CONFIG_LIBCRC32C is not set 1358# CONFIG_LIBCRC32C is not set
1334CONFIG_ZLIB_INFLATE=y 1359CONFIG_ZLIB_INFLATE=y
1335CONFIG_ZLIB_DEFLATE=m 1360CONFIG_ZLIB_DEFLATE=m
1336CONFIG_PLIST=y
1337CONFIG_HAS_IOMEM=y 1361CONFIG_HAS_IOMEM=y
1338CONFIG_HAS_IOPORT=y 1362CONFIG_HAS_IOPORT=y
1339CONFIG_HAS_DMA=y 1363CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 2cd1c2b218d7..deeabef8ab80 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -196,6 +196,7 @@ CONFIG_BFIN548_EZKIT=y
196# BF548 Specific Configuration 196# BF548 Specific Configuration
197# 197#
198# CONFIG_DEB_DMA_URGENT is not set 198# CONFIG_DEB_DMA_URGENT is not set
199# CONFIG_BF548_ATAPI_ALTERNATIVE_PORT is not set
199 200
200# 201#
201# Interrupt Priority Assignment 202# Interrupt Priority Assignment
@@ -298,7 +299,10 @@ CONFIG_HZ=250
298# CONFIG_SCHED_HRTICK is not set 299# CONFIG_SCHED_HRTICK is not set
299CONFIG_GENERIC_TIME=y 300CONFIG_GENERIC_TIME=y
300CONFIG_GENERIC_CLOCKEVENTS=y 301CONFIG_GENERIC_CLOCKEVENTS=y
302# CONFIG_TICKSOURCE_GPTMR0 is not set
303CONFIG_TICKSOURCE_CORETMR=y
301# CONFIG_CYCLES_CLOCKSOURCE is not set 304# CONFIG_CYCLES_CLOCKSOURCE is not set
305# CONFIG_GPTMR0_CLOCKSOURCE is not set
302# CONFIG_NO_HZ is not set 306# CONFIG_NO_HZ is not set
303# CONFIG_HIGH_RES_TIMERS is not set 307# CONFIG_HIGH_RES_TIMERS is not set
304CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 308CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -367,7 +371,9 @@ CONFIG_BFIN_DCACHE=y
367# CONFIG_BFIN_ICACHE_LOCK is not set 371# CONFIG_BFIN_ICACHE_LOCK is not set
368CONFIG_BFIN_WB=y 372CONFIG_BFIN_WB=y
369# CONFIG_BFIN_WT is not set 373# CONFIG_BFIN_WT is not set
370# CONFIG_BFIN_L2_CACHEABLE is not set 374# CONFIG_BFIN_L2_WB is not set
375CONFIG_BFIN_L2_WT=y
376# CONFIG_BFIN_L2_NOT_CACHED is not set
371# CONFIG_MPU is not set 377# CONFIG_MPU is not set
372 378
373# 379#
@@ -447,7 +453,7 @@ CONFIG_IP_PNP=y
447# CONFIG_NET_IPIP is not set 453# CONFIG_NET_IPIP is not set
448# CONFIG_NET_IPGRE is not set 454# CONFIG_NET_IPGRE is not set
449# CONFIG_ARPD is not set 455# CONFIG_ARPD is not set
450CONFIG_SYN_COOKIES=y 456# CONFIG_SYN_COOKIES is not set
451# CONFIG_INET_AH is not set 457# CONFIG_INET_AH is not set
452# CONFIG_INET_ESP is not set 458# CONFIG_INET_ESP is not set
453# CONFIG_INET_IPCOMP is not set 459# CONFIG_INET_IPCOMP is not set
@@ -616,9 +622,7 @@ CONFIG_MTD_RAM=y
616# 622#
617CONFIG_MTD_COMPLEX_MAPPINGS=y 623CONFIG_MTD_COMPLEX_MAPPINGS=y
618CONFIG_MTD_PHYSMAP=y 624CONFIG_MTD_PHYSMAP=y
619CONFIG_MTD_PHYSMAP_START=0x20000000 625# CONFIG_MTD_PHYSMAP_COMPAT is not set
620CONFIG_MTD_PHYSMAP_LEN=0
621CONFIG_MTD_PHYSMAP_BANKWIDTH=2
622# CONFIG_MTD_GPIO_ADDR is not set 626# CONFIG_MTD_GPIO_ADDR is not set
623# CONFIG_MTD_UCLINUX is not set 627# CONFIG_MTD_UCLINUX is not set
624# CONFIG_MTD_PLATRAM is not set 628# CONFIG_MTD_PLATRAM is not set
@@ -696,7 +700,7 @@ CONFIG_SCSI_DMA=y
696CONFIG_BLK_DEV_SD=y 700CONFIG_BLK_DEV_SD=y
697# CONFIG_CHR_DEV_ST is not set 701# CONFIG_CHR_DEV_ST is not set
698# CONFIG_CHR_DEV_OSST is not set 702# CONFIG_CHR_DEV_OSST is not set
699CONFIG_BLK_DEV_SR=y 703CONFIG_BLK_DEV_SR=m
700# CONFIG_BLK_DEV_SR_VENDOR is not set 704# CONFIG_BLK_DEV_SR_VENDOR is not set
701# CONFIG_CHR_DEV_SG is not set 705# CONFIG_CHR_DEV_SG is not set
702# CONFIG_CHR_DEV_SCH is not set 706# CONFIG_CHR_DEV_SCH is not set
@@ -718,9 +722,7 @@ CONFIG_SCSI_WAIT_SCAN=m
718# CONFIG_SCSI_ISCSI_ATTRS is not set 722# CONFIG_SCSI_ISCSI_ATTRS is not set
719# CONFIG_SCSI_SAS_LIBSAS is not set 723# CONFIG_SCSI_SAS_LIBSAS is not set
720# CONFIG_SCSI_SRP_ATTRS is not set 724# CONFIG_SCSI_SRP_ATTRS is not set
721CONFIG_SCSI_LOWLEVEL=y 725# CONFIG_SCSI_LOWLEVEL is not set
722# CONFIG_ISCSI_TCP is not set
723# CONFIG_SCSI_DEBUG is not set
724# CONFIG_SCSI_DH is not set 726# CONFIG_SCSI_DH is not set
725CONFIG_ATA=y 727CONFIG_ATA=y
726# CONFIG_ATA_NONSTANDARD is not set 728# CONFIG_ATA_NONSTANDARD is not set
@@ -752,9 +754,8 @@ CONFIG_SMSC911X=y
752# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 754# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
753# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 755# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
754# CONFIG_B44 is not set 756# CONFIG_B44 is not set
755CONFIG_NETDEV_1000=y 757# CONFIG_NETDEV_1000 is not set
756# CONFIG_AX88180 is not set 758# CONFIG_NETDEV_10000 is not set
757CONFIG_NETDEV_10000=y
758 759
759# 760#
760# Wireless LAN 761# Wireless LAN
@@ -821,11 +822,11 @@ CONFIG_KEYBOARD_BFIN=y
821# CONFIG_INPUT_JOYSTICK is not set 822# CONFIG_INPUT_JOYSTICK is not set
822# CONFIG_INPUT_TABLET is not set 823# CONFIG_INPUT_TABLET is not set
823CONFIG_INPUT_TOUCHSCREEN=y 824CONFIG_INPUT_TOUCHSCREEN=y
824# CONFIG_TOUCHSCREEN_ADS7846 is not set
825CONFIG_TOUCHSCREEN_AD7877=m 825CONFIG_TOUCHSCREEN_AD7877=m
826# CONFIG_TOUCHSCREEN_AD7879_I2C is not set 826# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
827# CONFIG_TOUCHSCREEN_AD7879_SPI is not set 827# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
828# CONFIG_TOUCHSCREEN_AD7879 is not set 828# CONFIG_TOUCHSCREEN_AD7879 is not set
829# CONFIG_TOUCHSCREEN_ADS7846 is not set
829# CONFIG_TOUCHSCREEN_FUJITSU is not set 830# CONFIG_TOUCHSCREEN_FUJITSU is not set
830# CONFIG_TOUCHSCREEN_GUNZE is not set 831# CONFIG_TOUCHSCREEN_GUNZE is not set
831# CONFIG_TOUCHSCREEN_ELO is not set 832# CONFIG_TOUCHSCREEN_ELO is not set
@@ -858,14 +859,14 @@ CONFIG_INPUT_MISC=y
858# Character devices 859# Character devices
859# 860#
860# CONFIG_AD9960 is not set 861# CONFIG_AD9960 is not set
861# CONFIG_SPI_ADC_BF533 is not set 862CONFIG_BFIN_DMA_INTERFACE=m
862# CONFIG_BF5xx_PPIFCD is not set 863# CONFIG_BFIN_PPI is not set
864# CONFIG_BFIN_PPIFCD is not set
863# CONFIG_BFIN_SIMPLE_TIMER is not set 865# CONFIG_BFIN_SIMPLE_TIMER is not set
864CONFIG_BF5xx_PPI=m 866# CONFIG_BFIN_SPI_ADC is not set
865CONFIG_BFIN_SPORT=m 867CONFIG_BFIN_SPORT=m
866# CONFIG_BFIN_TIMER_LATENCY is not set 868# CONFIG_BFIN_TIMER_LATENCY is not set
867# CONFIG_TWI_LCD is not set 869# CONFIG_BFIN_TWI_LCD is not set
868CONFIG_BFIN_DMA_INTERFACE=m
869CONFIG_SIMPLE_GPIO=m 870CONFIG_SIMPLE_GPIO=m
870CONFIG_VT=y 871CONFIG_VT=y
871CONFIG_CONSOLE_TRANSLATIONS=y 872CONFIG_CONSOLE_TRANSLATIONS=y
@@ -977,7 +978,30 @@ CONFIG_SPI_BFIN=y
977# CONFIG_SPI_SPIDEV is not set 978# CONFIG_SPI_SPIDEV is not set
978# CONFIG_SPI_TLE62X0 is not set 979# CONFIG_SPI_TLE62X0 is not set
979CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 980CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
980# CONFIG_GPIOLIB is not set 981CONFIG_GPIOLIB=y
982# CONFIG_DEBUG_GPIO is not set
983CONFIG_GPIO_SYSFS=y
984
985#
986# Memory mapped GPIO expanders:
987#
988
989#
990# I2C GPIO expanders:
991#
992# CONFIG_GPIO_MAX732X is not set
993# CONFIG_GPIO_PCA953X is not set
994# CONFIG_GPIO_PCF857X is not set
995
996#
997# PCI GPIO expanders:
998#
999
1000#
1001# SPI GPIO expanders:
1002#
1003# CONFIG_GPIO_MAX7301 is not set
1004# CONFIG_GPIO_MCP23S08 is not set
981# CONFIG_W1 is not set 1005# CONFIG_W1 is not set
982# CONFIG_POWER_SUPPLY is not set 1006# CONFIG_POWER_SUPPLY is not set
983# CONFIG_HWMON is not set 1007# CONFIG_HWMON is not set
@@ -1011,6 +1035,7 @@ CONFIG_SSB_POSSIBLE=y
1011# CONFIG_HTC_PASIC3 is not set 1035# CONFIG_HTC_PASIC3 is not set
1012# CONFIG_MFD_TMIO is not set 1036# CONFIG_MFD_TMIO is not set
1013# CONFIG_PMIC_DA903X is not set 1037# CONFIG_PMIC_DA903X is not set
1038# CONFIG_PMIC_ADP5520 is not set
1014# CONFIG_MFD_WM8400 is not set 1039# CONFIG_MFD_WM8400 is not set
1015# CONFIG_MFD_WM8350_I2C is not set 1040# CONFIG_MFD_WM8350_I2C is not set
1016# CONFIG_REGULATOR is not set 1041# CONFIG_REGULATOR is not set
@@ -1193,8 +1218,8 @@ CONFIG_USB=y
1193# 1218#
1194# Miscellaneous USB options 1219# Miscellaneous USB options
1195# 1220#
1196# CONFIG_USB_DEVICEFS is not set 1221CONFIG_USB_DEVICEFS=y
1197CONFIG_USB_DEVICE_CLASS=y 1222# CONFIG_USB_DEVICE_CLASS is not set
1198# CONFIG_USB_DYNAMIC_MINORS is not set 1223# CONFIG_USB_DYNAMIC_MINORS is not set
1199# CONFIG_USB_OTG is not set 1224# CONFIG_USB_OTG is not set
1200# CONFIG_USB_OTG_WHITELIST is not set 1225# CONFIG_USB_OTG_WHITELIST is not set
@@ -1222,10 +1247,10 @@ CONFIG_USB_MUSB_SOC=y
1222CONFIG_USB_MUSB_HOST=y 1247CONFIG_USB_MUSB_HOST=y
1223# CONFIG_USB_MUSB_PERIPHERAL is not set 1248# CONFIG_USB_MUSB_PERIPHERAL is not set
1224# CONFIG_USB_MUSB_OTG is not set 1249# CONFIG_USB_MUSB_OTG is not set
1225# CONFIG_USB_GADGET_MUSB_HDRC is not set
1226CONFIG_USB_MUSB_HDRC_HCD=y 1250CONFIG_USB_MUSB_HDRC_HCD=y
1227CONFIG_MUSB_PIO_ONLY=y 1251# CONFIG_MUSB_PIO_ONLY is not set
1228CONFIG_MUSB_DMA_POLL=y 1252CONFIG_USB_INVENTRA_DMA=y
1253# CONFIG_USB_TI_CPPI_DMA is not set
1229# CONFIG_USB_MUSB_DEBUG is not set 1254# CONFIG_USB_MUSB_DEBUG is not set
1230 1255
1231# 1256#
@@ -1243,7 +1268,7 @@ CONFIG_MUSB_DMA_POLL=y
1243# 1268#
1244# see USB_STORAGE Help for more information 1269# see USB_STORAGE Help for more information
1245# 1270#
1246CONFIG_USB_STORAGE=m 1271CONFIG_USB_STORAGE=y
1247# CONFIG_USB_STORAGE_DEBUG is not set 1272# CONFIG_USB_STORAGE_DEBUG is not set
1248# CONFIG_USB_STORAGE_DATAFAB is not set 1273# CONFIG_USB_STORAGE_DATAFAB is not set
1249# CONFIG_USB_STORAGE_FREECOM is not set 1274# CONFIG_USB_STORAGE_FREECOM is not set
@@ -1292,33 +1317,10 @@ CONFIG_USB_STORAGE=m
1292# CONFIG_USB_LD is not set 1317# CONFIG_USB_LD is not set
1293# CONFIG_USB_TRANCEVIBRATOR is not set 1318# CONFIG_USB_TRANCEVIBRATOR is not set
1294# CONFIG_USB_IOWARRIOR is not set 1319# CONFIG_USB_IOWARRIOR is not set
1320# CONFIG_USB_TEST is not set
1295# CONFIG_USB_ISIGHTFW is not set 1321# CONFIG_USB_ISIGHTFW is not set
1296# CONFIG_USB_VST is not set 1322# CONFIG_USB_VST is not set
1297# CONFIG_USB_GADGET is not set 1323# CONFIG_USB_GADGET is not set
1298# CONFIG_USB_GADGET_AT91 is not set
1299# CONFIG_USB_GADGET_ATMEL_USBA is not set
1300# CONFIG_USB_GADGET_FSL_USB2 is not set
1301# CONFIG_USB_GADGET_LH7A40X is not set
1302# CONFIG_USB_GADGET_OMAP is not set
1303# CONFIG_USB_GADGET_PXA25X is not set
1304# CONFIG_USB_GADGET_PXA27X is not set
1305# CONFIG_USB_GADGET_S3C2410 is not set
1306# CONFIG_USB_GADGET_M66592 is not set
1307# CONFIG_USB_GADGET_AMD5536UDC is not set
1308# CONFIG_USB_GADGET_FSL_QE is not set
1309# CONFIG_USB_GADGET_NET2272 is not set
1310# CONFIG_USB_GADGET_NET2280 is not set
1311# CONFIG_USB_GADGET_GOKU is not set
1312# CONFIG_USB_GADGET_DUMMY_HCD is not set
1313# CONFIG_USB_ZERO is not set
1314# CONFIG_USB_AUDIO is not set
1315# CONFIG_USB_ETH is not set
1316# CONFIG_USB_GADGETFS is not set
1317# CONFIG_USB_FILE_STORAGE is not set
1318# CONFIG_USB_G_SERIAL is not set
1319# CONFIG_USB_MIDI_GADGET is not set
1320# CONFIG_USB_G_PRINTER is not set
1321# CONFIG_USB_CDC_COMPOSITE is not set
1322CONFIG_MMC=y 1324CONFIG_MMC=y
1323# CONFIG_MMC_DEBUG is not set 1325# CONFIG_MMC_DEBUG is not set
1324# CONFIG_MMC_UNSAFE_RESUME is not set 1326# CONFIG_MMC_UNSAFE_RESUME is not set
@@ -1414,13 +1416,8 @@ CONFIG_EXT2_FS=y
1414CONFIG_EXT2_FS_XATTR=y 1416CONFIG_EXT2_FS_XATTR=y
1415# CONFIG_EXT2_FS_POSIX_ACL is not set 1417# CONFIG_EXT2_FS_POSIX_ACL is not set
1416# CONFIG_EXT2_FS_SECURITY is not set 1418# CONFIG_EXT2_FS_SECURITY is not set
1417CONFIG_EXT3_FS=y 1419# CONFIG_EXT3_FS is not set
1418CONFIG_EXT3_FS_XATTR=y
1419# CONFIG_EXT3_FS_POSIX_ACL is not set
1420# CONFIG_EXT3_FS_SECURITY is not set
1421# CONFIG_EXT4_FS is not set 1420# CONFIG_EXT4_FS is not set
1422CONFIG_JBD=y
1423# CONFIG_JBD_DEBUG is not set
1424CONFIG_FS_MBCACHE=y 1421CONFIG_FS_MBCACHE=y
1425# CONFIG_REISERFS_FS is not set 1422# CONFIG_REISERFS_FS is not set
1426# CONFIG_JFS_FS is not set 1423# CONFIG_JFS_FS is not set
@@ -1476,16 +1473,6 @@ CONFIG_SYSFS=y
1476# CONFIG_BEFS_FS is not set 1473# CONFIG_BEFS_FS is not set
1477# CONFIG_BFS_FS is not set 1474# CONFIG_BFS_FS is not set
1478# CONFIG_EFS_FS is not set 1475# CONFIG_EFS_FS is not set
1479CONFIG_YAFFS_FS=m
1480CONFIG_YAFFS_YAFFS1=y
1481# CONFIG_YAFFS_9BYTE_TAGS is not set
1482# CONFIG_YAFFS_DOES_ECC is not set
1483CONFIG_YAFFS_YAFFS2=y
1484CONFIG_YAFFS_AUTO_YAFFS2=y
1485# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1486# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1487# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1488CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1489CONFIG_JFFS2_FS=m 1476CONFIG_JFFS2_FS=m
1490CONFIG_JFFS2_FS_DEBUG=0 1477CONFIG_JFFS2_FS_DEBUG=0
1491CONFIG_JFFS2_FS_WRITEBUFFER=y 1478CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -1497,6 +1484,16 @@ CONFIG_JFFS2_ZLIB=y
1497# CONFIG_JFFS2_LZO is not set 1484# CONFIG_JFFS2_LZO is not set
1498CONFIG_JFFS2_RTIME=y 1485CONFIG_JFFS2_RTIME=y
1499# CONFIG_JFFS2_RUBIN is not set 1486# CONFIG_JFFS2_RUBIN is not set
1487CONFIG_YAFFS_FS=m
1488CONFIG_YAFFS_YAFFS1=y
1489# CONFIG_YAFFS_9BYTE_TAGS is not set
1490# CONFIG_YAFFS_DOES_ECC is not set
1491CONFIG_YAFFS_YAFFS2=y
1492CONFIG_YAFFS_AUTO_YAFFS2=y
1493# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1494# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1495# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1496CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1500# CONFIG_CRAMFS is not set 1497# CONFIG_CRAMFS is not set
1501# CONFIG_VXFS_FS is not set 1498# CONFIG_VXFS_FS is not set
1502# CONFIG_MINIX_FS is not set 1499# CONFIG_MINIX_FS is not set
@@ -1539,63 +1536,47 @@ CONFIG_CIFS=y
1539# 1536#
1540# Partition Types 1537# Partition Types
1541# 1538#
1542CONFIG_PARTITION_ADVANCED=y 1539# CONFIG_PARTITION_ADVANCED is not set
1543# CONFIG_ACORN_PARTITION is not set
1544# CONFIG_OSF_PARTITION is not set
1545# CONFIG_AMIGA_PARTITION is not set
1546# CONFIG_ATARI_PARTITION is not set
1547# CONFIG_MAC_PARTITION is not set
1548CONFIG_MSDOS_PARTITION=y 1540CONFIG_MSDOS_PARTITION=y
1549# CONFIG_BSD_DISKLABEL is not set
1550# CONFIG_MINIX_SUBPARTITION is not set
1551# CONFIG_SOLARIS_X86_PARTITION is not set
1552# CONFIG_UNIXWARE_DISKLABEL is not set
1553# CONFIG_LDM_PARTITION is not set
1554# CONFIG_SGI_PARTITION is not set
1555# CONFIG_ULTRIX_PARTITION is not set
1556# CONFIG_SUN_PARTITION is not set
1557# CONFIG_KARMA_PARTITION is not set
1558# CONFIG_EFI_PARTITION is not set
1559# CONFIG_SYSV68_PARTITION is not set
1560CONFIG_NLS=y 1541CONFIG_NLS=y
1561CONFIG_NLS_DEFAULT="iso8859-1" 1542CONFIG_NLS_DEFAULT="iso8859-1"
1562CONFIG_NLS_CODEPAGE_437=m 1543CONFIG_NLS_CODEPAGE_437=m
1563CONFIG_NLS_CODEPAGE_737=m 1544# CONFIG_NLS_CODEPAGE_737 is not set
1564CONFIG_NLS_CODEPAGE_775=m 1545# CONFIG_NLS_CODEPAGE_775 is not set
1565CONFIG_NLS_CODEPAGE_850=m 1546# CONFIG_NLS_CODEPAGE_850 is not set
1566CONFIG_NLS_CODEPAGE_852=m 1547# CONFIG_NLS_CODEPAGE_852 is not set
1567CONFIG_NLS_CODEPAGE_855=m 1548# CONFIG_NLS_CODEPAGE_855 is not set
1568CONFIG_NLS_CODEPAGE_857=m 1549# CONFIG_NLS_CODEPAGE_857 is not set
1569CONFIG_NLS_CODEPAGE_860=m 1550# CONFIG_NLS_CODEPAGE_860 is not set
1570CONFIG_NLS_CODEPAGE_861=m 1551# CONFIG_NLS_CODEPAGE_861 is not set
1571CONFIG_NLS_CODEPAGE_862=m 1552# CONFIG_NLS_CODEPAGE_862 is not set
1572CONFIG_NLS_CODEPAGE_863=m 1553# CONFIG_NLS_CODEPAGE_863 is not set
1573CONFIG_NLS_CODEPAGE_864=m 1554# CONFIG_NLS_CODEPAGE_864 is not set
1574CONFIG_NLS_CODEPAGE_865=m 1555# CONFIG_NLS_CODEPAGE_865 is not set
1575CONFIG_NLS_CODEPAGE_866=m 1556# CONFIG_NLS_CODEPAGE_866 is not set
1576CONFIG_NLS_CODEPAGE_869=m 1557# CONFIG_NLS_CODEPAGE_869 is not set
1577CONFIG_NLS_CODEPAGE_936=m 1558CONFIG_NLS_CODEPAGE_936=m
1578CONFIG_NLS_CODEPAGE_950=m 1559# CONFIG_NLS_CODEPAGE_950 is not set
1579CONFIG_NLS_CODEPAGE_932=m 1560# CONFIG_NLS_CODEPAGE_932 is not set
1580CONFIG_NLS_CODEPAGE_949=m 1561# CONFIG_NLS_CODEPAGE_949 is not set
1581CONFIG_NLS_CODEPAGE_874=m 1562# CONFIG_NLS_CODEPAGE_874 is not set
1582CONFIG_NLS_ISO8859_8=m 1563# CONFIG_NLS_ISO8859_8 is not set
1583CONFIG_NLS_CODEPAGE_1250=m 1564# CONFIG_NLS_CODEPAGE_1250 is not set
1584CONFIG_NLS_CODEPAGE_1251=m 1565# CONFIG_NLS_CODEPAGE_1251 is not set
1585CONFIG_NLS_ASCII=m 1566# CONFIG_NLS_ASCII is not set
1586CONFIG_NLS_ISO8859_1=m 1567CONFIG_NLS_ISO8859_1=m
1587CONFIG_NLS_ISO8859_2=m 1568# CONFIG_NLS_ISO8859_2 is not set
1588CONFIG_NLS_ISO8859_3=m 1569# CONFIG_NLS_ISO8859_3 is not set
1589CONFIG_NLS_ISO8859_4=m 1570# CONFIG_NLS_ISO8859_4 is not set
1590CONFIG_NLS_ISO8859_5=m 1571# CONFIG_NLS_ISO8859_5 is not set
1591CONFIG_NLS_ISO8859_6=m 1572# CONFIG_NLS_ISO8859_6 is not set
1592CONFIG_NLS_ISO8859_7=m 1573# CONFIG_NLS_ISO8859_7 is not set
1593CONFIG_NLS_ISO8859_9=m 1574# CONFIG_NLS_ISO8859_9 is not set
1594CONFIG_NLS_ISO8859_13=m 1575# CONFIG_NLS_ISO8859_13 is not set
1595CONFIG_NLS_ISO8859_14=m 1576# CONFIG_NLS_ISO8859_14 is not set
1596CONFIG_NLS_ISO8859_15=m 1577# CONFIG_NLS_ISO8859_15 is not set
1597CONFIG_NLS_KOI8_R=m 1578# CONFIG_NLS_KOI8_R is not set
1598CONFIG_NLS_KOI8_U=m 1579# CONFIG_NLS_KOI8_U is not set
1599CONFIG_NLS_UTF8=m 1580CONFIG_NLS_UTF8=m
1600# CONFIG_DLM is not set 1581# CONFIG_DLM is not set
1601 1582
@@ -1611,7 +1592,7 @@ CONFIG_FRAME_WARN=1024
1611CONFIG_DEBUG_FS=y 1592CONFIG_DEBUG_FS=y
1612# CONFIG_HEADERS_CHECK is not set 1593# CONFIG_HEADERS_CHECK is not set
1613CONFIG_DEBUG_KERNEL=y 1594CONFIG_DEBUG_KERNEL=y
1614# CONFIG_DEBUG_SHIRQ is not set 1595CONFIG_DEBUG_SHIRQ=y
1615CONFIG_DETECT_SOFTLOCKUP=y 1596CONFIG_DETECT_SOFTLOCKUP=y
1616# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1597# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1617CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1598CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1620,8 +1601,6 @@ CONFIG_SCHED_DEBUG=y
1620# CONFIG_TIMER_STATS is not set 1601# CONFIG_TIMER_STATS is not set
1621# CONFIG_DEBUG_OBJECTS is not set 1602# CONFIG_DEBUG_OBJECTS is not set
1622# CONFIG_DEBUG_SLAB is not set 1603# CONFIG_DEBUG_SLAB is not set
1623# CONFIG_DEBUG_RT_MUTEXES is not set
1624# CONFIG_RT_MUTEX_TESTER is not set
1625# CONFIG_DEBUG_SPINLOCK is not set 1604# CONFIG_DEBUG_SPINLOCK is not set
1626# CONFIG_DEBUG_MUTEXES is not set 1605# CONFIG_DEBUG_MUTEXES is not set
1627# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1606# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1641,7 +1620,6 @@ CONFIG_DEBUG_INFO=y
1641# CONFIG_BACKTRACE_SELF_TEST is not set 1620# CONFIG_BACKTRACE_SELF_TEST is not set
1642# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1621# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1643# CONFIG_FAULT_INJECTION is not set 1622# CONFIG_FAULT_INJECTION is not set
1644# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1645 1623
1646# 1624#
1647# Tracers 1625# Tracers
@@ -1657,16 +1635,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1657# CONFIG_DEBUG_STACK_USAGE is not set 1635# CONFIG_DEBUG_STACK_USAGE is not set
1658CONFIG_DEBUG_VERBOSE=y 1636CONFIG_DEBUG_VERBOSE=y
1659CONFIG_DEBUG_MMRS=y 1637CONFIG_DEBUG_MMRS=y
1660# CONFIG_DEBUG_HWERR is not set 1638CONFIG_DEBUG_HWERR=y
1661# CONFIG_DEBUG_DOUBLEFAULT is not set 1639CONFIG_EXACT_HWERR=y
1640CONFIG_DEBUG_DOUBLEFAULT=y
1641CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1642# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1643# CONFIG_DEBUG_ICACHE_CHECK is not set
1662CONFIG_DEBUG_HUNT_FOR_ZERO=y 1644CONFIG_DEBUG_HUNT_FOR_ZERO=y
1663CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1645CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1664CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1646# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1665# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1647CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1666# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1648# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1667CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1649CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1668# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1650# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1669# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1651CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1670CONFIG_EARLY_PRINTK=y 1652CONFIG_EARLY_PRINTK=y
1671CONFIG_CPLB_INFO=y 1653CONFIG_CPLB_INFO=y
1672CONFIG_ACCESS_CHECK=y 1654CONFIG_ACCESS_CHECK=y
@@ -1780,7 +1762,6 @@ CONFIG_CRC32=y
1780# CONFIG_LIBCRC32C is not set 1762# CONFIG_LIBCRC32C is not set
1781CONFIG_ZLIB_INFLATE=y 1763CONFIG_ZLIB_INFLATE=y
1782CONFIG_ZLIB_DEFLATE=m 1764CONFIG_ZLIB_DEFLATE=m
1783CONFIG_PLIST=y
1784CONFIG_HAS_IOMEM=y 1765CONFIG_HAS_IOMEM=y
1785CONFIG_HAS_IOPORT=y 1766CONFIG_HAS_IOPORT=y
1786CONFIG_HAS_DMA=y 1767CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 4a6ea8e31df7..dcfbe2e2931e 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# Thu May 21 05:50:01 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -42,10 +43,11 @@ CONFIG_LOG_BUF_SHIFT=14
42CONFIG_BLK_DEV_INITRD=y 43CONFIG_BLK_DEV_INITRD=y
43CONFIG_INITRAMFS_SOURCE="" 44CONFIG_INITRAMFS_SOURCE=""
44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
45# CONFIG_SYSCTL is not set 46CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y
46CONFIG_EMBEDDED=y 48CONFIG_EMBEDDED=y
47CONFIG_UID16=y 49CONFIG_UID16=y
48CONFIG_SYSCTL_SYSCALL=y 50# CONFIG_SYSCTL_SYSCALL is not set
49CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
50# CONFIG_KALLSYMS_ALL is not set 52# CONFIG_KALLSYMS_ALL is not set
51# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -53,16 +55,15 @@ CONFIG_HOTPLUG=y
53CONFIG_PRINTK=y 55CONFIG_PRINTK=y
54CONFIG_BUG=y 56CONFIG_BUG=y
55# CONFIG_ELF_CORE is not set 57# CONFIG_ELF_CORE is not set
56CONFIG_COMPAT_BRK=y
57CONFIG_BASE_FULL=y 58CONFIG_BASE_FULL=y
58# CONFIG_FUTEX is not set 59# CONFIG_FUTEX is not set
59CONFIG_ANON_INODES=y
60CONFIG_EPOLL=y 60CONFIG_EPOLL=y
61CONFIG_SIGNALFD=y 61# CONFIG_SIGNALFD is not set
62CONFIG_TIMERFD=y 62# CONFIG_TIMERFD is not set
63CONFIG_EVENTFD=y 63# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 64# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 65CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y
66CONFIG_SLAB=y 67CONFIG_SLAB=y
67# CONFIG_SLUB is not set 68# CONFIG_SLUB is not set
68# CONFIG_SLOB is not set 69# CONFIG_SLOB is not set
@@ -71,7 +72,6 @@ CONFIG_SLAB=y
71CONFIG_HAVE_OPROFILE=y 72CONFIG_HAVE_OPROFILE=y
72# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 73# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
73CONFIG_SLABINFO=y 74CONFIG_SLABINFO=y
74CONFIG_RT_MUTEXES=y
75CONFIG_TINY_SHMEM=y 75CONFIG_TINY_SHMEM=y
76CONFIG_BASE_SMALL=0 76CONFIG_BASE_SMALL=0
77CONFIG_MODULES=y 77CONFIG_MODULES=y
@@ -148,9 +148,9 @@ CONFIG_BF_REV_MAX=5
148# CONFIG_BF_REV_0_0 is not set 148# CONFIG_BF_REV_0_0 is not set
149# CONFIG_BF_REV_0_1 is not set 149# CONFIG_BF_REV_0_1 is not set
150# CONFIG_BF_REV_0_2 is not set 150# CONFIG_BF_REV_0_2 is not set
151CONFIG_BF_REV_0_3=y 151# CONFIG_BF_REV_0_3 is not set
152# CONFIG_BF_REV_0_4 is not set 152# CONFIG_BF_REV_0_4 is not set
153# CONFIG_BF_REV_0_5 is not set 153CONFIG_BF_REV_0_5=y
154# CONFIG_BF_REV_0_6 is not set 154# CONFIG_BF_REV_0_6 is not set
155# CONFIG_BF_REV_ANY is not set 155# CONFIG_BF_REV_ANY is not set
156# CONFIG_BF_REV_NONE is not set 156# CONFIG_BF_REV_NONE is not set
@@ -179,7 +179,6 @@ CONFIG_BFIN561_EZKIT=y
179# Core B Support 179# Core B Support
180# 180#
181CONFIG_BF561_COREB=y 181CONFIG_BF561_COREB=y
182CONFIG_BF561_COREB_RESET=y
183 182
184# 183#
185# Interrupt Priority Assignment 184# Interrupt Priority Assignment
@@ -264,7 +263,10 @@ CONFIG_HZ=250
264CONFIG_SCHED_HRTICK=y 263CONFIG_SCHED_HRTICK=y
265CONFIG_GENERIC_TIME=y 264CONFIG_GENERIC_TIME=y
266CONFIG_GENERIC_CLOCKEVENTS=y 265CONFIG_GENERIC_CLOCKEVENTS=y
266# CONFIG_TICKSOURCE_GPTMR0 is not set
267CONFIG_TICKSOURCE_CORETMR=y
267# CONFIG_CYCLES_CLOCKSOURCE is not set 268# CONFIG_CYCLES_CLOCKSOURCE is not set
269# CONFIG_GPTMR0_CLOCKSOURCE is not set
268CONFIG_TICK_ONESHOT=y 270CONFIG_TICK_ONESHOT=y
269# CONFIG_NO_HZ is not set 271# CONFIG_NO_HZ is not set
270CONFIG_HIGH_RES_TIMERS=y 272CONFIG_HIGH_RES_TIMERS=y
@@ -334,7 +336,9 @@ CONFIG_BFIN_DCACHE=y
334# CONFIG_BFIN_ICACHE_LOCK is not set 336# CONFIG_BFIN_ICACHE_LOCK is not set
335CONFIG_BFIN_WB=y 337CONFIG_BFIN_WB=y
336# CONFIG_BFIN_WT is not set 338# CONFIG_BFIN_WT is not set
337# CONFIG_BFIN_L2_CACHEABLE is not set 339# CONFIG_BFIN_L2_WB is not set
340CONFIG_BFIN_L2_WT=y
341# CONFIG_BFIN_L2_NOT_CACHED is not set
338# CONFIG_MPU is not set 342# CONFIG_MPU is not set
339 343
340# 344#
@@ -415,7 +419,7 @@ CONFIG_IP_PNP=y
415# CONFIG_NET_IPIP is not set 419# CONFIG_NET_IPIP is not set
416# CONFIG_NET_IPGRE is not set 420# CONFIG_NET_IPGRE is not set
417# CONFIG_ARPD is not set 421# CONFIG_ARPD is not set
418CONFIG_SYN_COOKIES=y 422# CONFIG_SYN_COOKIES is not set
419# CONFIG_INET_AH is not set 423# CONFIG_INET_AH is not set
420# CONFIG_INET_ESP is not set 424# CONFIG_INET_ESP is not set
421# CONFIG_INET_IPCOMP is not set 425# CONFIG_INET_IPCOMP is not set
@@ -570,9 +574,7 @@ CONFIG_MTD_ROM=m
570# 574#
571# CONFIG_MTD_COMPLEX_MAPPINGS is not set 575# CONFIG_MTD_COMPLEX_MAPPINGS is not set
572CONFIG_MTD_PHYSMAP=m 576CONFIG_MTD_PHYSMAP=m
573CONFIG_MTD_PHYSMAP_START=0x20000000 577# CONFIG_MTD_PHYSMAP_COMPAT is not set
574CONFIG_MTD_PHYSMAP_LEN=0x0
575CONFIG_MTD_PHYSMAP_BANKWIDTH=2
576# CONFIG_MTD_UCLINUX is not set 578# CONFIG_MTD_UCLINUX is not set
577# CONFIG_MTD_PLATRAM is not set 579# CONFIG_MTD_PLATRAM is not set
578 580
@@ -649,9 +651,8 @@ CONFIG_SMC91X=y
649# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 651# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
650# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 652# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
651# CONFIG_B44 is not set 653# CONFIG_B44 is not set
652CONFIG_NETDEV_1000=y 654# CONFIG_NETDEV_1000 is not set
653# CONFIG_AX88180 is not set 655# CONFIG_NETDEV_10000 is not set
654CONFIG_NETDEV_10000=y
655 656
656# 657#
657# Wireless LAN 658# Wireless LAN
@@ -703,13 +704,13 @@ CONFIG_INPUT_EVDEV=m
703# Character devices 704# Character devices
704# 705#
705# CONFIG_AD9960 is not set 706# CONFIG_AD9960 is not set
706# CONFIG_SPI_ADC_BF533 is not set 707CONFIG_BFIN_DMA_INTERFACE=m
707# CONFIG_BF5xx_PPIFCD is not set 708# CONFIG_BFIN_PPI is not set
709# CONFIG_BFIN_PPIFCD is not set
708# CONFIG_BFIN_SIMPLE_TIMER is not set 710# CONFIG_BFIN_SIMPLE_TIMER is not set
709# CONFIG_BF5xx_PPI is not set 711# CONFIG_BFIN_SPI_ADC is not set
710# CONFIG_BFIN_SPORT is not set 712# CONFIG_BFIN_SPORT is not set
711# CONFIG_BFIN_TIMER_LATENCY is not set 713# CONFIG_BFIN_TIMER_LATENCY is not set
712CONFIG_BFIN_DMA_INTERFACE=m
713CONFIG_SIMPLE_GPIO=m 714CONFIG_SIMPLE_GPIO=m
714# CONFIG_VT is not set 715# CONFIG_VT is not set
715# CONFIG_DEVKMEM is not set 716# CONFIG_DEVKMEM is not set
@@ -765,7 +766,30 @@ CONFIG_SPI_BFIN=y
765# CONFIG_SPI_SPIDEV is not set 766# CONFIG_SPI_SPIDEV is not set
766# CONFIG_SPI_TLE62X0 is not set 767# CONFIG_SPI_TLE62X0 is not set
767CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 768CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
768# CONFIG_GPIOLIB is not set 769CONFIG_GPIOLIB=y
770# CONFIG_DEBUG_GPIO is not set
771CONFIG_GPIO_SYSFS=y
772
773#
774# Memory mapped GPIO expanders:
775#
776
777#
778# I2C GPIO expanders:
779#
780# CONFIG_GPIO_MAX732X is not set
781# CONFIG_GPIO_PCA953X is not set
782# CONFIG_GPIO_PCF857X is not set
783
784#
785# PCI GPIO expanders:
786#
787
788#
789# SPI GPIO expanders:
790#
791# CONFIG_GPIO_MAX7301 is not set
792# CONFIG_GPIO_MCP23S08 is not set
769# CONFIG_W1 is not set 793# CONFIG_W1 is not set
770# CONFIG_POWER_SUPPLY is not set 794# CONFIG_POWER_SUPPLY is not set
771# CONFIG_HWMON is not set 795# CONFIG_HWMON is not set
@@ -897,16 +921,6 @@ CONFIG_SYSFS=y
897# CONFIG_BEFS_FS is not set 921# CONFIG_BEFS_FS is not set
898# CONFIG_BFS_FS is not set 922# CONFIG_BFS_FS is not set
899# CONFIG_EFS_FS is not set 923# CONFIG_EFS_FS is not set
900CONFIG_YAFFS_FS=m
901CONFIG_YAFFS_YAFFS1=y
902# CONFIG_YAFFS_9BYTE_TAGS is not set
903# CONFIG_YAFFS_DOES_ECC is not set
904CONFIG_YAFFS_YAFFS2=y
905CONFIG_YAFFS_AUTO_YAFFS2=y
906# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
907# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
908# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
909CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
910CONFIG_JFFS2_FS=m 924CONFIG_JFFS2_FS=m
911CONFIG_JFFS2_FS_DEBUG=0 925CONFIG_JFFS2_FS_DEBUG=0
912CONFIG_JFFS2_FS_WRITEBUFFER=y 926CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -918,6 +932,16 @@ CONFIG_JFFS2_ZLIB=y
918# CONFIG_JFFS2_LZO is not set 932# CONFIG_JFFS2_LZO is not set
919CONFIG_JFFS2_RTIME=y 933CONFIG_JFFS2_RTIME=y
920# CONFIG_JFFS2_RUBIN is not set 934# CONFIG_JFFS2_RUBIN is not set
935CONFIG_YAFFS_FS=m
936CONFIG_YAFFS_YAFFS1=y
937# CONFIG_YAFFS_9BYTE_TAGS is not set
938# CONFIG_YAFFS_DOES_ECC is not set
939CONFIG_YAFFS_YAFFS2=y
940CONFIG_YAFFS_AUTO_YAFFS2=y
941# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
942# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
943# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
944CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
921# CONFIG_CRAMFS is not set 945# CONFIG_CRAMFS is not set
922# CONFIG_VXFS_FS is not set 946# CONFIG_VXFS_FS is not set
923# CONFIG_MINIX_FS is not set 947# CONFIG_MINIX_FS is not set
@@ -1006,7 +1030,7 @@ CONFIG_FRAME_WARN=1024
1006CONFIG_DEBUG_FS=y 1030CONFIG_DEBUG_FS=y
1007# CONFIG_HEADERS_CHECK is not set 1031# CONFIG_HEADERS_CHECK is not set
1008CONFIG_DEBUG_KERNEL=y 1032CONFIG_DEBUG_KERNEL=y
1009# CONFIG_DEBUG_SHIRQ is not set 1033CONFIG_DEBUG_SHIRQ=y
1010CONFIG_DETECT_SOFTLOCKUP=y 1034CONFIG_DETECT_SOFTLOCKUP=y
1011# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1035# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1012CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1036CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1015,8 +1039,6 @@ CONFIG_SCHED_DEBUG=y
1015# CONFIG_TIMER_STATS is not set 1039# CONFIG_TIMER_STATS is not set
1016# CONFIG_DEBUG_OBJECTS is not set 1040# CONFIG_DEBUG_OBJECTS is not set
1017# CONFIG_DEBUG_SLAB is not set 1041# CONFIG_DEBUG_SLAB is not set
1018# CONFIG_DEBUG_RT_MUTEXES is not set
1019# CONFIG_RT_MUTEX_TESTER is not set
1020# CONFIG_DEBUG_SPINLOCK is not set 1042# CONFIG_DEBUG_SPINLOCK is not set
1021# CONFIG_DEBUG_MUTEXES is not set 1043# CONFIG_DEBUG_MUTEXES is not set
1022# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1044# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
@@ -1036,7 +1058,6 @@ CONFIG_DEBUG_INFO=y
1036# CONFIG_BACKTRACE_SELF_TEST is not set 1058# CONFIG_BACKTRACE_SELF_TEST is not set
1037# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1059# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1038# CONFIG_FAULT_INJECTION is not set 1060# CONFIG_FAULT_INJECTION is not set
1039# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1040 1061
1041# 1062#
1042# Tracers 1063# Tracers
@@ -1052,16 +1073,20 @@ CONFIG_HAVE_ARCH_KGDB=y
1052# CONFIG_DEBUG_STACK_USAGE is not set 1073# CONFIG_DEBUG_STACK_USAGE is not set
1053CONFIG_DEBUG_VERBOSE=y 1074CONFIG_DEBUG_VERBOSE=y
1054CONFIG_DEBUG_MMRS=y 1075CONFIG_DEBUG_MMRS=y
1055# CONFIG_DEBUG_HWERR is not set 1076CONFIG_DEBUG_HWERR=y
1056# CONFIG_DEBUG_DOUBLEFAULT is not set 1077CONFIG_EXACT_HWERR=y
1078CONFIG_DEBUG_DOUBLEFAULT=y
1079CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1080# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1081# CONFIG_DEBUG_ICACHE_CHECK is not set
1057CONFIG_DEBUG_HUNT_FOR_ZERO=y 1082CONFIG_DEBUG_HUNT_FOR_ZERO=y
1058CONFIG_DEBUG_BFIN_HWTRACE_ON=y 1083CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1059CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 1084# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1060# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set 1085CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1061# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set 1086# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1062CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 1087CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1063# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set 1088# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1064# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set 1089CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1065CONFIG_EARLY_PRINTK=y 1090CONFIG_EARLY_PRINTK=y
1066CONFIG_CPLB_INFO=y 1091CONFIG_CPLB_INFO=y
1067CONFIG_ACCESS_CHECK=y 1092CONFIG_ACCESS_CHECK=y
@@ -1174,7 +1199,6 @@ CONFIG_CRC32=y
1174# CONFIG_LIBCRC32C is not set 1199# CONFIG_LIBCRC32C is not set
1175CONFIG_ZLIB_INFLATE=y 1200CONFIG_ZLIB_INFLATE=y
1176CONFIG_ZLIB_DEFLATE=m 1201CONFIG_ZLIB_DEFLATE=m
1177CONFIG_PLIST=y
1178CONFIG_HAS_IOMEM=y 1202CONFIG_HAS_IOMEM=y
1179CONFIG_HAS_IOPORT=y 1203CONFIG_HAS_IOPORT=y
1180CONFIG_HAS_DMA=y 1204CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index ef1a2c84ace1..174c578b8ec4 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -46,7 +46,7 @@ CONFIG_INITRAMFS_SOURCE=""
46# CONFIG_SYSCTL is not set 46# CONFIG_SYSCTL is not set
47CONFIG_EMBEDDED=y 47CONFIG_EMBEDDED=y
48CONFIG_UID16=y 48CONFIG_UID16=y
49CONFIG_SYSCTL_SYSCALL=y 49# CONFIG_SYSCTL_SYSCALL is not set
50CONFIG_SYSCTL_SYSCALL_CHECK=y 50CONFIG_SYSCTL_SYSCALL_CHECK=y
51CONFIG_KALLSYMS=y 51CONFIG_KALLSYMS=y
52# CONFIG_KALLSYMS_EXTRA_PASS is not set 52# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -381,7 +381,7 @@ CONFIG_IP_PNP=y
381# CONFIG_NET_IPIP is not set 381# CONFIG_NET_IPIP is not set
382# CONFIG_NET_IPGRE is not set 382# CONFIG_NET_IPGRE is not set
383# CONFIG_ARPD is not set 383# CONFIG_ARPD is not set
384CONFIG_SYN_COOKIES=y 384# CONFIG_SYN_COOKIES is not set
385# CONFIG_INET_AH is not set 385# CONFIG_INET_AH is not set
386# CONFIG_INET_ESP is not set 386# CONFIG_INET_ESP is not set
387# CONFIG_INET_IPCOMP is not set 387# CONFIG_INET_IPCOMP is not set
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index e2fc588e4336..e17875e8abe8 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -46,7 +46,7 @@ CONFIG_INITRAMFS_SOURCE=""
46# CONFIG_SYSCTL is not set 46# CONFIG_SYSCTL is not set
47CONFIG_EMBEDDED=y 47CONFIG_EMBEDDED=y
48CONFIG_UID16=y 48CONFIG_UID16=y
49CONFIG_SYSCTL_SYSCALL=y 49# CONFIG_SYSCTL_SYSCALL is not set
50CONFIG_KALLSYMS=y 50CONFIG_KALLSYMS=y
51# CONFIG_KALLSYMS_ALL is not set 51# CONFIG_KALLSYMS_ALL is not set
52# CONFIG_KALLSYMS_EXTRA_PASS is not set 52# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -411,7 +411,7 @@ CONFIG_IP_PNP=y
411# CONFIG_NET_IPIP is not set 411# CONFIG_NET_IPIP is not set
412# CONFIG_NET_IPGRE is not set 412# CONFIG_NET_IPGRE is not set
413# CONFIG_ARPD is not set 413# CONFIG_ARPD is not set
414CONFIG_SYN_COOKIES=y 414# CONFIG_SYN_COOKIES is not set
415# CONFIG_INET_AH is not set 415# CONFIG_INET_AH is not set
416# CONFIG_INET_ESP is not set 416# CONFIG_INET_ESP is not set
417# CONFIG_INET_IPCOMP is not set 417# CONFIG_INET_IPCOMP is not set
@@ -783,7 +783,30 @@ CONFIG_SPI_BFIN=y
783# CONFIG_SPI_SPIDEV is not set 783# CONFIG_SPI_SPIDEV is not set
784# CONFIG_SPI_TLE62X0 is not set 784# CONFIG_SPI_TLE62X0 is not set
785CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 785CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
786# CONFIG_GPIOLIB is not set 786CONFIG_GPIOLIB=y
787# CONFIG_DEBUG_GPIO is not set
788CONFIG_GPIO_SYSFS=y
789
790#
791# Memory mapped GPIO expanders:
792#
793
794#
795# I2C GPIO expanders:
796#
797# CONFIG_GPIO_MAX732X is not set
798# CONFIG_GPIO_PCA953X is not set
799# CONFIG_GPIO_PCF857X is not set
800
801#
802# PCI GPIO expanders:
803#
804
805#
806# SPI GPIO expanders:
807#
808# CONFIG_GPIO_MAX7301 is not set
809# CONFIG_GPIO_MCP23S08 is not set
787# CONFIG_W1 is not set 810# CONFIG_W1 is not set
788# CONFIG_POWER_SUPPLY is not set 811# CONFIG_POWER_SUPPLY is not set
789CONFIG_HWMON=y 812CONFIG_HWMON=y
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 65a8bbb8d647..fafd95e84b28 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -49,7 +49,7 @@ CONFIG_LOG_BUF_SHIFT=14
49# CONFIG_SYSCTL is not set 49# CONFIG_SYSCTL is not set
50CONFIG_EMBEDDED=y 50CONFIG_EMBEDDED=y
51# CONFIG_UID16 is not set 51# CONFIG_UID16 is not set
52CONFIG_SYSCTL_SYSCALL=y 52# CONFIG_SYSCTL_SYSCALL is not set
53CONFIG_KALLSYMS=y 53CONFIG_KALLSYMS=y
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 54# CONFIG_KALLSYMS_EXTRA_PASS is not set
55# CONFIG_HOTPLUG is not set 55# CONFIG_HOTPLUG is not set
@@ -347,7 +347,7 @@ CONFIG_IP_FIB_HASH=y
347# CONFIG_NET_IPIP is not set 347# CONFIG_NET_IPIP is not set
348# CONFIG_NET_IPGRE is not set 348# CONFIG_NET_IPGRE is not set
349# CONFIG_ARPD is not set 349# CONFIG_ARPD is not set
350CONFIG_SYN_COOKIES=y 350# CONFIG_SYN_COOKIES is not set
351# CONFIG_INET_AH is not set 351# CONFIG_INET_AH is not set
352# CONFIG_INET_ESP is not set 352# CONFIG_INET_ESP is not set
353# CONFIG_INET_IPCOMP is not set 353# CONFIG_INET_IPCOMP is not set
@@ -547,9 +547,9 @@ CONFIG_MII=y
547CONFIG_SMC91X=y 547CONFIG_SMC91X=y
548# CONFIG_SMSC911X is not set 548# CONFIG_SMSC911X is not set
549# CONFIG_DM9000 is not set 549# CONFIG_DM9000 is not set
550CONFIG_NETDEV_1000=y 550# CONFIG_NETDEV_1000 is not set
551# CONFIG_AX88180 is not set 551# CONFIG_AX88180 is not set
552CONFIG_NETDEV_10000=y 552# CONFIG_NETDEV_10000 is not set
553 553
554# 554#
555# Wireless LAN 555# Wireless LAN
@@ -641,6 +641,10 @@ CONFIG_UNIX98_PTYS=y
641# CONFIG_TCG_TPM is not set 641# CONFIG_TCG_TPM is not set
642# CONFIG_I2C is not set 642# CONFIG_I2C is not set
643 643
644CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
645CONFIG_GPIOLIB=y
646CONFIG_GPIO_SYSFS=y
647
644# 648#
645# SPI support 649# SPI support
646# 650#
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index 9b7e9d781145..e73aa5af58b9 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -1,6 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22.16 3# Linux kernel version: 2.6.28.10
4# Wed Jun 3 06:27:41 2009
4# 5#
5# CONFIG_MMU is not set 6# CONFIG_MMU is not set
6# CONFIG_FPU is not set 7# CONFIG_FPU is not set
@@ -8,48 +9,44 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
9CONFIG_BLACKFIN=y 10CONFIG_BLACKFIN=y
10CONFIG_ZONE_DMA=y 11CONFIG_ZONE_DMA=y
11CONFIG_SEMAPHORE_SLEEPERS=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 12CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y 13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_TIME=y
17CONFIG_GENERIC_GPIO=y 16CONFIG_GENERIC_GPIO=y
18CONFIG_FORCE_MAX_ZONEORDER=14 17CONFIG_FORCE_MAX_ZONEORDER=14
19CONFIG_GENERIC_CALIBRATE_DELAY=y 18CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
21 20
22# 21#
23# Code maturity level options 22# General setup
24# 23#
25CONFIG_EXPERIMENTAL=y 24CONFIG_EXPERIMENTAL=y
26CONFIG_BROKEN_ON_SMP=y 25CONFIG_BROKEN_ON_SMP=y
27CONFIG_INIT_ENV_ARG_LIMIT=32 26CONFIG_INIT_ENV_ARG_LIMIT=32
28
29#
30# General setup
31#
32CONFIG_LOCALVERSION="" 27CONFIG_LOCALVERSION=""
33CONFIG_LOCALVERSION_AUTO=y 28CONFIG_LOCALVERSION_AUTO=y
34CONFIG_SYSVIPC=y 29CONFIG_SYSVIPC=y
35# CONFIG_IPC_NS is not set
36CONFIG_SYSVIPC_SYSCTL=y 30CONFIG_SYSVIPC_SYSCTL=y
37# CONFIG_POSIX_MQUEUE is not set 31# CONFIG_POSIX_MQUEUE is not set
38# CONFIG_BSD_PROCESS_ACCT is not set 32# CONFIG_BSD_PROCESS_ACCT is not set
39# CONFIG_TASKSTATS is not set 33# CONFIG_TASKSTATS is not set
40# CONFIG_UTS_NS is not set
41# CONFIG_AUDIT is not set 34# CONFIG_AUDIT is not set
42CONFIG_IKCONFIG=y 35CONFIG_IKCONFIG=y
43CONFIG_IKCONFIG_PROC=y 36CONFIG_IKCONFIG_PROC=y
44CONFIG_LOG_BUF_SHIFT=14 37CONFIG_LOG_BUF_SHIFT=14
45# CONFIG_SYSFS_DEPRECATED is not set 38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set
46# CONFIG_RELAY is not set 41# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set
47# CONFIG_BLK_DEV_INITRD is not set 43# CONFIG_BLK_DEV_INITRD is not set
48# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
49# CONFIG_SYSCTL is not set 45CONFIG_SYSCTL=y
46CONFIG_ANON_INODES=y
50CONFIG_EMBEDDED=y 47CONFIG_EMBEDDED=y
51# CONFIG_UID16 is not set 48# CONFIG_UID16 is not set
52CONFIG_SYSCTL_SYSCALL=y 49# CONFIG_SYSCTL_SYSCALL is not set
53CONFIG_KALLSYMS=y 50CONFIG_KALLSYMS=y
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 51# CONFIG_KALLSYMS_EXTRA_PASS is not set
55# CONFIG_HOTPLUG is not set 52# CONFIG_HOTPLUG is not set
@@ -58,37 +55,36 @@ CONFIG_BUG=y
58# CONFIG_ELF_CORE is not set 55# CONFIG_ELF_CORE is not set
59CONFIG_BASE_FULL=y 56CONFIG_BASE_FULL=y
60# CONFIG_FUTEX is not set 57# CONFIG_FUTEX is not set
61CONFIG_ANON_INODES=y
62CONFIG_EPOLL=y 58CONFIG_EPOLL=y
63CONFIG_SIGNALFD=y 59# CONFIG_SIGNALFD is not set
64CONFIG_EVENTFD=y 60# CONFIG_TIMERFD is not set
61# CONFIG_EVENTFD is not set
62# CONFIG_AIO is not set
65CONFIG_VM_EVENT_COUNTERS=y 63CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3 64CONFIG_COMPAT_BRK=y
67# CONFIG_NP2 is not set
68CONFIG_SLAB=y 65CONFIG_SLAB=y
69# CONFIG_SLUB is not set 66# CONFIG_SLUB is not set
70# CONFIG_SLOB is not set 67# CONFIG_SLOB is not set
71CONFIG_RT_MUTEXES=y 68# CONFIG_PROFILING is not set
69# CONFIG_MARKERS is not set
70CONFIG_HAVE_OPROFILE=y
71# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
72CONFIG_SLABINFO=y
72CONFIG_TINY_SHMEM=y 73CONFIG_TINY_SHMEM=y
73CONFIG_BASE_SMALL=0 74CONFIG_BASE_SMALL=0
74
75#
76# Loadable module support
77#
78CONFIG_MODULES=y 75CONFIG_MODULES=y
76# CONFIG_MODULE_FORCE_LOAD is not set
79CONFIG_MODULE_UNLOAD=y 77CONFIG_MODULE_UNLOAD=y
80# CONFIG_MODULE_FORCE_UNLOAD is not set 78# CONFIG_MODULE_FORCE_UNLOAD is not set
81# CONFIG_MODVERSIONS is not set 79# CONFIG_MODVERSIONS is not set
82# CONFIG_MODULE_SRCVERSION_ALL is not set 80# CONFIG_MODULE_SRCVERSION_ALL is not set
83CONFIG_KMOD=y 81CONFIG_KMOD=y
84
85#
86# Block layer
87#
88CONFIG_BLOCK=y 82CONFIG_BLOCK=y
89# CONFIG_LBD is not set 83# CONFIG_LBD is not set
90# CONFIG_BLK_DEV_IO_TRACE is not set 84# CONFIG_BLK_DEV_IO_TRACE is not set
91# CONFIG_LSF is not set 85# CONFIG_LSF is not set
86# CONFIG_BLK_DEV_BSG is not set
87# CONFIG_BLK_DEV_INTEGRITY is not set
92 88
93# 89#
94# IO Schedulers 90# IO Schedulers
@@ -102,9 +98,11 @@ CONFIG_IOSCHED_CFQ=y
102# CONFIG_DEFAULT_CFQ is not set 98# CONFIG_DEFAULT_CFQ is not set
103CONFIG_DEFAULT_NOOP=y 99CONFIG_DEFAULT_NOOP=y
104CONFIG_DEFAULT_IOSCHED="noop" 100CONFIG_DEFAULT_IOSCHED="noop"
101CONFIG_CLASSIC_RCU=y
105CONFIG_PREEMPT_NONE=y 102CONFIG_PREEMPT_NONE=y
106# CONFIG_PREEMPT_VOLUNTARY is not set 103# CONFIG_PREEMPT_VOLUNTARY is not set
107# CONFIG_PREEMPT is not set 104# CONFIG_PREEMPT is not set
105# CONFIG_FREEZER is not set
108 106
109# 107#
110# Blackfin Processor Options 108# Blackfin Processor Options
@@ -113,6 +111,10 @@ CONFIG_PREEMPT_NONE=y
113# 111#
114# Processor and Board Settings 112# Processor and Board Settings
115# 113#
114# CONFIG_BF512 is not set
115# CONFIG_BF514 is not set
116# CONFIG_BF516 is not set
117# CONFIG_BF518 is not set
116# CONFIG_BF522 is not set 118# CONFIG_BF522 is not set
117# CONFIG_BF523 is not set 119# CONFIG_BF523 is not set
118# CONFIG_BF524 is not set 120# CONFIG_BF524 is not set
@@ -125,22 +127,31 @@ CONFIG_PREEMPT_NONE=y
125# CONFIG_BF534 is not set 127# CONFIG_BF534 is not set
126# CONFIG_BF536 is not set 128# CONFIG_BF536 is not set
127CONFIG_BF537=y 129CONFIG_BF537=y
130# CONFIG_BF538 is not set
131# CONFIG_BF539 is not set
128# CONFIG_BF542 is not set 132# CONFIG_BF542 is not set
133# CONFIG_BF542M is not set
129# CONFIG_BF544 is not set 134# CONFIG_BF544 is not set
135# CONFIG_BF544M is not set
130# CONFIG_BF547 is not set 136# CONFIG_BF547 is not set
137# CONFIG_BF547M is not set
131# CONFIG_BF548 is not set 138# CONFIG_BF548 is not set
139# CONFIG_BF548M is not set
132# CONFIG_BF549 is not set 140# CONFIG_BF549 is not set
141# CONFIG_BF549M is not set
133# CONFIG_BF561 is not set 142# CONFIG_BF561 is not set
143CONFIG_BF_REV_MIN=2
144CONFIG_BF_REV_MAX=3
134# CONFIG_BF_REV_0_0 is not set 145# CONFIG_BF_REV_0_0 is not set
135# CONFIG_BF_REV_0_1 is not set 146# CONFIG_BF_REV_0_1 is not set
136CONFIG_BF_REV_0_2=y 147CONFIG_BF_REV_0_2=y
137# CONFIG_BF_REV_0_3 is not set 148# CONFIG_BF_REV_0_3 is not set
138# CONFIG_BF_REV_0_4 is not set 149# CONFIG_BF_REV_0_4 is not set
139# CONFIG_BF_REV_0_5 is not set 150# CONFIG_BF_REV_0_5 is not set
151# CONFIG_BF_REV_0_6 is not set
140# CONFIG_BF_REV_ANY is not set 152# CONFIG_BF_REV_ANY is not set
141# CONFIG_BF_REV_NONE is not set 153# CONFIG_BF_REV_NONE is not set
142CONFIG_BF53x=y 154CONFIG_BF53x=y
143CONFIG_BFIN_SINGLE_CORE=y
144CONFIG_MEM_MT48LC16M16A2TG_75=y 155CONFIG_MEM_MT48LC16M16A2TG_75=y
145CONFIG_IRQ_PLL_WAKEUP=7 156CONFIG_IRQ_PLL_WAKEUP=7
146CONFIG_IRQ_RTC=8 157CONFIG_IRQ_RTC=8
@@ -150,7 +161,6 @@ CONFIG_IRQ_SPORT0_TX=9
150CONFIG_IRQ_SPORT1_RX=9 161CONFIG_IRQ_SPORT1_RX=9
151CONFIG_IRQ_SPORT1_TX=9 162CONFIG_IRQ_SPORT1_TX=9
152CONFIG_IRQ_TWI=10 163CONFIG_IRQ_TWI=10
153CONFIG_IRQ_SPI=10
154CONFIG_IRQ_UART0_RX=10 164CONFIG_IRQ_UART0_RX=10
155CONFIG_IRQ_UART0_TX=10 165CONFIG_IRQ_UART0_TX=10
156CONFIG_IRQ_UART1_RX=10 166CONFIG_IRQ_UART1_RX=10
@@ -169,11 +179,12 @@ CONFIG_IRQ_PORTG_INTB=12
169CONFIG_IRQ_MEM_DMA0=13 179CONFIG_IRQ_MEM_DMA0=13
170CONFIG_IRQ_MEM_DMA1=13 180CONFIG_IRQ_MEM_DMA1=13
171CONFIG_IRQ_WATCH=13 181CONFIG_IRQ_WATCH=13
182CONFIG_IRQ_SPI=10
172# CONFIG_BFIN537_STAMP is not set 183# CONFIG_BFIN537_STAMP is not set
173CONFIG_BFIN537_BLUETECHNIX_CM=y 184CONFIG_BFIN537_BLUETECHNIX_CM=y
185# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
174# CONFIG_PNAV10 is not set 186# CONFIG_PNAV10 is not set
175# CONFIG_CAMSIG_MINOTAUR is not set 187# CONFIG_CAMSIG_MINOTAUR is not set
176# CONFIG_GENERIC_BF537_BOARD is not set
177 188
178# 189#
179# BF537 Specific Configuration 190# BF537 Specific Configuration
@@ -196,6 +207,7 @@ CONFIG_IRQ_PROG_INTA=12
196# Board customizations 207# Board customizations
197# 208#
198# CONFIG_CMDLINE_BOOL is not set 209# CONFIG_CMDLINE_BOOL is not set
210CONFIG_BOOT_LOAD=0x1000
199 211
200# 212#
201# Clock/PLL Setup 213# Clock/PLL Setup
@@ -215,13 +227,20 @@ CONFIG_HZ_250=y
215# CONFIG_HZ_300 is not set 227# CONFIG_HZ_300 is not set
216# CONFIG_HZ_1000 is not set 228# CONFIG_HZ_1000 is not set
217CONFIG_HZ=250 229CONFIG_HZ=250
230# CONFIG_SCHED_HRTICK is not set
231CONFIG_GENERIC_TIME=y
232CONFIG_GENERIC_CLOCKEVENTS=y
233# CONFIG_TICKSOURCE_GPTMR0 is not set
234CONFIG_TICKSOURCE_CORETMR=y
235# CONFIG_CYCLES_CLOCKSOURCE is not set
236# CONFIG_GPTMR0_CLOCKSOURCE is not set
237# CONFIG_NO_HZ is not set
238# CONFIG_HIGH_RES_TIMERS is not set
239CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
218 240
219# 241#
220# Memory Setup 242# Misc
221# 243#
222CONFIG_MAX_MEM_SIZE=32
223CONFIG_MEM_ADD_WIDTH=9
224CONFIG_BOOT_LOAD=0x1000
225CONFIG_BFIN_SCRATCH_REG_RETN=y 244CONFIG_BFIN_SCRATCH_REG_RETN=y
226# CONFIG_BFIN_SCRATCH_REG_RETE is not set 245# CONFIG_BFIN_SCRATCH_REG_RETE is not set
227# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set 246# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
@@ -248,6 +267,12 @@ CONFIG_IP_CHECKSUM_L1=y
248CONFIG_CACHELINE_ALIGNED_L1=y 267CONFIG_CACHELINE_ALIGNED_L1=y
249CONFIG_SYSCALL_TAB_L1=y 268CONFIG_SYSCALL_TAB_L1=y
250CONFIG_CPLB_SWITCH_TAB_L1=y 269CONFIG_CPLB_SWITCH_TAB_L1=y
270CONFIG_APP_STACK_L1=y
271
272#
273# Speed Optimizations
274#
275CONFIG_BFIN_INS_LOWOVERHEAD=y
251CONFIG_RAMKERNEL=y 276CONFIG_RAMKERNEL=y
252# CONFIG_ROMKERNEL is not set 277# CONFIG_ROMKERNEL is not set
253CONFIG_SELECT_MEMORY_MODEL=y 278CONFIG_SELECT_MEMORY_MODEL=y
@@ -256,12 +281,14 @@ CONFIG_FLATMEM_MANUAL=y
256# CONFIG_SPARSEMEM_MANUAL is not set 281# CONFIG_SPARSEMEM_MANUAL is not set
257CONFIG_FLATMEM=y 282CONFIG_FLATMEM=y
258CONFIG_FLAT_NODE_MEM_MAP=y 283CONFIG_FLAT_NODE_MEM_MAP=y
259# CONFIG_SPARSEMEM_STATIC is not set 284CONFIG_PAGEFLAGS_EXTENDED=y
260CONFIG_SPLIT_PTLOCK_CPUS=4 285CONFIG_SPLIT_PTLOCK_CPUS=4
261# CONFIG_RESOURCES_64BIT is not set 286# CONFIG_RESOURCES_64BIT is not set
287# CONFIG_PHYS_ADDR_T_64BIT is not set
262CONFIG_ZONE_DMA_FLAG=1 288CONFIG_ZONE_DMA_FLAG=1
263CONFIG_LARGE_ALLOCS=y 289CONFIG_VIRT_TO_BUS=y
264# CONFIG_BFIN_GPTIMERS is not set 290# CONFIG_BFIN_GPTIMERS is not set
291# CONFIG_DMA_UNCACHED_4M is not set
265# CONFIG_DMA_UNCACHED_2M is not set 292# CONFIG_DMA_UNCACHED_2M is not set
266CONFIG_DMA_UNCACHED_1M=y 293CONFIG_DMA_UNCACHED_1M=y
267# CONFIG_DMA_UNCACHED_NONE is not set 294# CONFIG_DMA_UNCACHED_NONE is not set
@@ -275,7 +302,6 @@ CONFIG_BFIN_DCACHE=y
275# CONFIG_BFIN_ICACHE_LOCK is not set 302# CONFIG_BFIN_ICACHE_LOCK is not set
276CONFIG_BFIN_WB=y 303CONFIG_BFIN_WB=y
277# CONFIG_BFIN_WT is not set 304# CONFIG_BFIN_WT is not set
278CONFIG_L1_MAX_PIECE=16
279# CONFIG_MPU is not set 305# CONFIG_MPU is not set
280 306
281# 307#
@@ -304,36 +330,28 @@ CONFIG_BANK_3=0xFFC2
304# 330#
305# Bus options (PCI, PCMCIA, EISA, MCA, ISA) 331# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
306# 332#
307# CONFIG_PCI is not set
308# CONFIG_ARCH_SUPPORTS_MSI is not set 333# CONFIG_ARCH_SUPPORTS_MSI is not set
309 334
310# 335#
311# PCCARD (PCMCIA/CardBus) support
312#
313
314#
315# Executable file formats 336# Executable file formats
316# 337#
317CONFIG_BINFMT_ELF_FDPIC=y 338CONFIG_BINFMT_ELF_FDPIC=y
318CONFIG_BINFMT_FLAT=y 339CONFIG_BINFMT_FLAT=y
319CONFIG_BINFMT_ZFLAT=y 340CONFIG_BINFMT_ZFLAT=y
320CONFIG_BINFMT_SHARED_FLAT=y 341CONFIG_BINFMT_SHARED_FLAT=y
342# CONFIG_HAVE_AOUT is not set
321# CONFIG_BINFMT_MISC is not set 343# CONFIG_BINFMT_MISC is not set
322 344
323# 345#
324# Power management options 346# Power management options
325# 347#
326# CONFIG_PM is not set 348# CONFIG_PM is not set
327# CONFIG_PM_WAKEUP_BY_GPIO is not set 349CONFIG_ARCH_SUSPEND_POSSIBLE=y
328 350
329# 351#
330# CPU Frequency scaling 352# CPU Frequency scaling
331# 353#
332# CONFIG_CPU_FREQ is not set 354# CONFIG_CPU_FREQ is not set
333
334#
335# Networking
336#
337CONFIG_NET=y 355CONFIG_NET=y
338 356
339# 357#
@@ -346,6 +364,7 @@ CONFIG_XFRM=y
346# CONFIG_XFRM_USER is not set 364# CONFIG_XFRM_USER is not set
347# CONFIG_XFRM_SUB_POLICY is not set 365# CONFIG_XFRM_SUB_POLICY is not set
348# CONFIG_XFRM_MIGRATE is not set 366# CONFIG_XFRM_MIGRATE is not set
367# CONFIG_XFRM_STATISTICS is not set
349# CONFIG_NET_KEY is not set 368# CONFIG_NET_KEY is not set
350CONFIG_INET=y 369CONFIG_INET=y
351# CONFIG_IP_MULTICAST is not set 370# CONFIG_IP_MULTICAST is not set
@@ -358,7 +377,7 @@ CONFIG_IP_PNP=y
358# CONFIG_NET_IPIP is not set 377# CONFIG_NET_IPIP is not set
359# CONFIG_NET_IPGRE is not set 378# CONFIG_NET_IPGRE is not set
360# CONFIG_ARPD is not set 379# CONFIG_ARPD is not set
361CONFIG_SYN_COOKIES=y 380# CONFIG_SYN_COOKIES is not set
362# CONFIG_INET_AH is not set 381# CONFIG_INET_AH is not set
363# CONFIG_INET_ESP is not set 382# CONFIG_INET_ESP is not set
364# CONFIG_INET_IPCOMP is not set 383# CONFIG_INET_IPCOMP is not set
@@ -367,6 +386,7 @@ CONFIG_SYN_COOKIES=y
367CONFIG_INET_XFRM_MODE_TRANSPORT=y 386CONFIG_INET_XFRM_MODE_TRANSPORT=y
368CONFIG_INET_XFRM_MODE_TUNNEL=y 387CONFIG_INET_XFRM_MODE_TUNNEL=y
369CONFIG_INET_XFRM_MODE_BEET=y 388CONFIG_INET_XFRM_MODE_BEET=y
389# CONFIG_INET_LRO is not set
370CONFIG_INET_DIAG=y 390CONFIG_INET_DIAG=y
371CONFIG_INET_TCP_DIAG=y 391CONFIG_INET_TCP_DIAG=y
372# CONFIG_TCP_CONG_ADVANCED is not set 392# CONFIG_TCP_CONG_ADVANCED is not set
@@ -374,8 +394,6 @@ CONFIG_TCP_CONG_CUBIC=y
374CONFIG_DEFAULT_TCP_CONG="cubic" 394CONFIG_DEFAULT_TCP_CONG="cubic"
375# CONFIG_TCP_MD5SIG is not set 395# CONFIG_TCP_MD5SIG is not set
376# CONFIG_IPV6 is not set 396# CONFIG_IPV6 is not set
377# CONFIG_INET6_XFRM_TUNNEL is not set
378# CONFIG_INET6_TUNNEL is not set
379# CONFIG_NETLABEL is not set 397# CONFIG_NETLABEL is not set
380# CONFIG_NETWORK_SECMARK is not set 398# CONFIG_NETWORK_SECMARK is not set
381# CONFIG_NETFILTER is not set 399# CONFIG_NETFILTER is not set
@@ -384,6 +402,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
384# CONFIG_TIPC is not set 402# CONFIG_TIPC is not set
385# CONFIG_ATM is not set 403# CONFIG_ATM is not set
386# CONFIG_BRIDGE is not set 404# CONFIG_BRIDGE is not set
405# CONFIG_NET_DSA is not set
387# CONFIG_VLAN_8021Q is not set 406# CONFIG_VLAN_8021Q is not set
388# CONFIG_DECNET is not set 407# CONFIG_DECNET is not set
389# CONFIG_LLC2 is not set 408# CONFIG_LLC2 is not set
@@ -393,10 +412,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
393# CONFIG_LAPB is not set 412# CONFIG_LAPB is not set
394# CONFIG_ECONET is not set 413# CONFIG_ECONET is not set
395# CONFIG_WAN_ROUTER is not set 414# CONFIG_WAN_ROUTER is not set
396
397#
398# QoS and/or fair queueing
399#
400# CONFIG_NET_SCHED is not set 415# CONFIG_NET_SCHED is not set
401 416
402# 417#
@@ -404,18 +419,14 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
404# 419#
405# CONFIG_NET_PKTGEN is not set 420# CONFIG_NET_PKTGEN is not set
406# CONFIG_HAMRADIO is not set 421# CONFIG_HAMRADIO is not set
422# CONFIG_CAN is not set
407# CONFIG_IRDA is not set 423# CONFIG_IRDA is not set
408# CONFIG_BT is not set 424# CONFIG_BT is not set
409# CONFIG_AF_RXRPC is not set 425# CONFIG_AF_RXRPC is not set
410 426# CONFIG_PHONET is not set
411# 427# CONFIG_WIRELESS is not set
412# Wireless
413#
414# CONFIG_CFG80211 is not set
415# CONFIG_WIRELESS_EXT is not set
416# CONFIG_MAC80211 is not set
417# CONFIG_IEEE80211 is not set
418# CONFIG_RFKILL is not set 428# CONFIG_RFKILL is not set
429# CONFIG_NET_9P is not set
419 430
420# 431#
421# Device Drivers 432# Device Drivers
@@ -427,10 +438,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
427CONFIG_STANDALONE=y 438CONFIG_STANDALONE=y
428CONFIG_PREVENT_FIRMWARE_BUILD=y 439CONFIG_PREVENT_FIRMWARE_BUILD=y
429# CONFIG_SYS_HYPERVISOR is not set 440# CONFIG_SYS_HYPERVISOR is not set
430
431#
432# Connector - unified userspace <-> kernelspace linker
433#
434# CONFIG_CONNECTOR is not set 441# CONFIG_CONNECTOR is not set
435CONFIG_MTD=y 442CONFIG_MTD=y
436# CONFIG_MTD_DEBUG is not set 443# CONFIG_MTD_DEBUG is not set
@@ -438,6 +445,7 @@ CONFIG_MTD=y
438CONFIG_MTD_PARTITIONS=y 445CONFIG_MTD_PARTITIONS=y
439# CONFIG_MTD_REDBOOT_PARTS is not set 446# CONFIG_MTD_REDBOOT_PARTS is not set
440# CONFIG_MTD_CMDLINE_PARTS is not set 447# CONFIG_MTD_CMDLINE_PARTS is not set
448# CONFIG_MTD_AR7_PARTS is not set
441 449
442# 450#
443# User Modules And Translation Layers 451# User Modules And Translation Layers
@@ -450,12 +458,15 @@ CONFIG_MTD_BLOCK=y
450# CONFIG_INFTL is not set 458# CONFIG_INFTL is not set
451# CONFIG_RFD_FTL is not set 459# CONFIG_RFD_FTL is not set
452# CONFIG_SSFDC is not set 460# CONFIG_SSFDC is not set
461# CONFIG_MTD_OOPS is not set
453 462
454# 463#
455# RAM/ROM/Flash chip drivers 464# RAM/ROM/Flash chip drivers
456# 465#
457# CONFIG_MTD_CFI is not set 466CONFIG_MTD_CFI=y
458# CONFIG_MTD_JEDECPROBE is not set 467# CONFIG_MTD_JEDECPROBE is not set
468CONFIG_MTD_GEN_PROBE=y
469# CONFIG_MTD_CFI_ADV_OPTIONS is not set
459CONFIG_MTD_MAP_BANK_WIDTH_1=y 470CONFIG_MTD_MAP_BANK_WIDTH_1=y
460CONFIG_MTD_MAP_BANK_WIDTH_2=y 471CONFIG_MTD_MAP_BANK_WIDTH_2=y
461CONFIG_MTD_MAP_BANK_WIDTH_4=y 472CONFIG_MTD_MAP_BANK_WIDTH_4=y
@@ -466,6 +477,10 @@ CONFIG_MTD_CFI_I1=y
466CONFIG_MTD_CFI_I2=y 477CONFIG_MTD_CFI_I2=y
467# CONFIG_MTD_CFI_I4 is not set 478# CONFIG_MTD_CFI_I4 is not set
468# CONFIG_MTD_CFI_I8 is not set 479# CONFIG_MTD_CFI_I8 is not set
480CONFIG_MTD_CFI_INTELEXT=y
481# CONFIG_MTD_CFI_AMDSTD is not set
482# CONFIG_MTD_CFI_STAA is not set
483CONFIG_MTD_CFI_UTIL=y
469CONFIG_MTD_RAM=y 484CONFIG_MTD_RAM=y
470# CONFIG_MTD_ROM is not set 485# CONFIG_MTD_ROM is not set
471# CONFIG_MTD_ABSENT is not set 486# CONFIG_MTD_ABSENT is not set
@@ -473,7 +488,8 @@ CONFIG_MTD_RAM=y
473# 488#
474# Mapping drivers for chip access 489# Mapping drivers for chip access
475# 490#
476# CONFIG_MTD_COMPLEX_MAPPINGS is not set 491CONFIG_MTD_COMPLEX_MAPPINGS=y
492CONFIG_MTD_GPIO_ADDR=y
477CONFIG_MTD_UCLINUX=y 493CONFIG_MTD_UCLINUX=y
478# CONFIG_MTD_PLATRAM is not set 494# CONFIG_MTD_PLATRAM is not set
479 495
@@ -498,33 +514,23 @@ CONFIG_MTD_UCLINUX=y
498# UBI - Unsorted block images 514# UBI - Unsorted block images
499# 515#
500# CONFIG_MTD_UBI is not set 516# CONFIG_MTD_UBI is not set
501
502#
503# Parallel port support
504#
505# CONFIG_PARPORT is not set 517# CONFIG_PARPORT is not set
506 518CONFIG_BLK_DEV=y
507#
508# Plug and Play support
509#
510# CONFIG_PNPACPI is not set
511
512#
513# Block devices
514#
515# CONFIG_BLK_DEV_COW_COMMON is not set 519# CONFIG_BLK_DEV_COW_COMMON is not set
516# CONFIG_BLK_DEV_LOOP is not set 520# CONFIG_BLK_DEV_LOOP is not set
517# CONFIG_BLK_DEV_NBD is not set 521# CONFIG_BLK_DEV_NBD is not set
518CONFIG_BLK_DEV_RAM=y 522CONFIG_BLK_DEV_RAM=y
519CONFIG_BLK_DEV_RAM_COUNT=16 523CONFIG_BLK_DEV_RAM_COUNT=16
520CONFIG_BLK_DEV_RAM_SIZE=4096 524CONFIG_BLK_DEV_RAM_SIZE=4096
521CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 525# CONFIG_BLK_DEV_XIP is not set
522# CONFIG_CDROM_PKTCDVD is not set 526# CONFIG_CDROM_PKTCDVD is not set
523# CONFIG_ATA_OVER_ETH is not set 527# CONFIG_ATA_OVER_ETH is not set
524 528# CONFIG_BLK_DEV_HD is not set
525# 529CONFIG_MISC_DEVICES=y
526# Misc devices 530# CONFIG_EEPROM_93CX6 is not set
527# 531# CONFIG_ENCLOSURE_SERVICES is not set
532# CONFIG_C2PORT is not set
533CONFIG_HAVE_IDE=y
528# CONFIG_IDE is not set 534# CONFIG_IDE is not set
529 535
530# 536#
@@ -532,22 +538,17 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
532# 538#
533# CONFIG_RAID_ATTRS is not set 539# CONFIG_RAID_ATTRS is not set
534# CONFIG_SCSI is not set 540# CONFIG_SCSI is not set
541# CONFIG_SCSI_DMA is not set
535# CONFIG_SCSI_NETLINK is not set 542# CONFIG_SCSI_NETLINK is not set
536# CONFIG_ATA is not set 543# CONFIG_ATA is not set
537
538#
539# Multi-device support (RAID and LVM)
540#
541# CONFIG_MD is not set 544# CONFIG_MD is not set
542
543#
544# Network device support
545#
546CONFIG_NETDEVICES=y 545CONFIG_NETDEVICES=y
547# CONFIG_DUMMY is not set 546# CONFIG_DUMMY is not set
548# CONFIG_BONDING is not set 547# CONFIG_BONDING is not set
548# CONFIG_MACVLAN is not set
549# CONFIG_EQUALIZER is not set 549# CONFIG_EQUALIZER is not set
550# CONFIG_TUN is not set 550# CONFIG_TUN is not set
551# CONFIG_VETH is not set
551CONFIG_PHYLIB=y 552CONFIG_PHYLIB=y
552 553
553# 554#
@@ -561,46 +562,44 @@ CONFIG_PHYLIB=y
561# CONFIG_VITESSE_PHY is not set 562# CONFIG_VITESSE_PHY is not set
562# CONFIG_SMSC_PHY is not set 563# CONFIG_SMSC_PHY is not set
563# CONFIG_BROADCOM_PHY is not set 564# CONFIG_BROADCOM_PHY is not set
565# CONFIG_ICPLUS_PHY is not set
566# CONFIG_REALTEK_PHY is not set
564# CONFIG_FIXED_PHY is not set 567# CONFIG_FIXED_PHY is not set
565 568# CONFIG_MDIO_BITBANG is not set
566#
567# Ethernet (10 or 100Mbit)
568#
569CONFIG_NET_ETHERNET=y 569CONFIG_NET_ETHERNET=y
570CONFIG_MII=y 570CONFIG_MII=y
571# CONFIG_SMC91X is not set
572CONFIG_BFIN_MAC=y 571CONFIG_BFIN_MAC=y
573CONFIG_BFIN_MAC_USE_L1=y 572CONFIG_BFIN_MAC_USE_L1=y
574CONFIG_BFIN_TX_DESC_NUM=10 573CONFIG_BFIN_TX_DESC_NUM=10
575CONFIG_BFIN_RX_DESC_NUM=20 574CONFIG_BFIN_RX_DESC_NUM=20
576# CONFIG_BFIN_MAC_RMII is not set 575# CONFIG_BFIN_MAC_RMII is not set
576# CONFIG_SMC91X is not set
577# CONFIG_SMSC911X is not set 577# CONFIG_SMSC911X is not set
578# CONFIG_DM9000 is not set 578# CONFIG_DM9000 is not set
579CONFIG_NETDEV_1000=y 579# CONFIG_IBM_NEW_EMAC_ZMII is not set
580# CONFIG_AX88180 is not set 580# CONFIG_IBM_NEW_EMAC_RGMII is not set
581CONFIG_NETDEV_10000=y 581# CONFIG_IBM_NEW_EMAC_TAH is not set
582# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
583# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
584# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
585# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
586# CONFIG_B44 is not set
587# CONFIG_NETDEV_1000 is not set
588# CONFIG_NETDEV_10000 is not set
582 589
583# 590#
584# Wireless LAN 591# Wireless LAN
585# 592#
586# CONFIG_WLAN_PRE80211 is not set 593# CONFIG_WLAN_PRE80211 is not set
587# CONFIG_WLAN_80211 is not set 594# CONFIG_WLAN_80211 is not set
595# CONFIG_IWLWIFI_LEDS is not set
588# CONFIG_WAN is not set 596# CONFIG_WAN is not set
589# CONFIG_PPP is not set 597# CONFIG_PPP is not set
590# CONFIG_SLIP is not set 598# CONFIG_SLIP is not set
591# CONFIG_SHAPER is not set
592# CONFIG_NETCONSOLE is not set 599# CONFIG_NETCONSOLE is not set
593# CONFIG_NETPOLL is not set 600# CONFIG_NETPOLL is not set
594# CONFIG_NET_POLL_CONTROLLER is not set 601# CONFIG_NET_POLL_CONTROLLER is not set
595
596#
597# ISDN subsystem
598#
599# CONFIG_ISDN is not set 602# CONFIG_ISDN is not set
600
601#
602# Telephony Support
603#
604# CONFIG_PHONE is not set 603# CONFIG_PHONE is not set
605 604
606# 605#
@@ -618,15 +617,17 @@ CONFIG_NETDEV_10000=y
618# Character devices 617# Character devices
619# 618#
620# CONFIG_AD9960 is not set 619# CONFIG_AD9960 is not set
621# CONFIG_SPI_ADC_BF533 is not set 620CONFIG_BFIN_DMA_INTERFACE=m
622# CONFIG_BF5xx_PFLAGS is not set 621# CONFIG_BFIN_PPI is not set
623# CONFIG_BF5xx_PPIFCD is not set 622# CONFIG_BFIN_PPIFCD is not set
624# CONFIG_BFIN_SIMPLE_TIMER is not set 623# CONFIG_BFIN_SIMPLE_TIMER is not set
625# CONFIG_BF5xx_PPI is not set 624# CONFIG_BFIN_SPI_ADC is not set
626CONFIG_BFIN_SPORT=y 625CONFIG_BFIN_SPORT=y
627# CONFIG_BFIN_TIMER_LATENCY is not set 626# CONFIG_BFIN_TIMER_LATENCY is not set
627# CONFIG_SIMPLE_GPIO is not set
628# CONFIG_VT is not set 628# CONFIG_VT is not set
629# CONFIG_DEVKMEM is not set 629# CONFIG_DEVKMEM is not set
630# CONFIG_BFIN_JTAG_COMM is not set
630# CONFIG_SERIAL_NONSTANDARD is not set 631# CONFIG_SERIAL_NONSTANDARD is not set
631 632
632# 633#
@@ -655,138 +656,119 @@ CONFIG_UNIX98_PTYS=y
655# CAN, the car bus and industrial fieldbus 656# CAN, the car bus and industrial fieldbus
656# 657#
657# CONFIG_CAN4LINUX is not set 658# CONFIG_CAN4LINUX is not set
658
659#
660# IPMI
661#
662# CONFIG_IPMI_HANDLER is not set 659# CONFIG_IPMI_HANDLER is not set
663# CONFIG_WATCHDOG is not set
664# CONFIG_HW_RANDOM is not set 660# CONFIG_HW_RANDOM is not set
665# CONFIG_GEN_RTC is not set
666# CONFIG_R3964 is not set 661# CONFIG_R3964 is not set
667# CONFIG_RAW_DRIVER is not set 662# CONFIG_RAW_DRIVER is not set
663# CONFIG_TCG_TPM is not set
664# CONFIG_I2C is not set
665# CONFIG_SPI is not set
666CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
667CONFIG_GPIOLIB=y
668CONFIG_GPIO_SYSFS=y
668 669
669# 670#
670# TPM devices 671# Memory mapped GPIO expanders:
671# 672#
672# CONFIG_TCG_TPM is not set
673# CONFIG_I2C is not set
674 673
675# 674#
676# SPI support 675# I2C GPIO expanders:
676#
677
678#
679# PCI GPIO expanders:
677# 680#
678# CONFIG_SPI is not set
679# CONFIG_SPI_MASTER is not set
680 681
681# 682#
682# Dallas's 1-wire bus 683# SPI GPIO expanders:
683# 684#
684# CONFIG_W1 is not set 685# CONFIG_W1 is not set
686# CONFIG_POWER_SUPPLY is not set
685CONFIG_HWMON=y 687CONFIG_HWMON=y
686# CONFIG_HWMON_VID is not set 688# CONFIG_HWMON_VID is not set
687# CONFIG_SENSORS_ABITUGURU is not set
688# CONFIG_SENSORS_F71805F is not set 689# CONFIG_SENSORS_F71805F is not set
690# CONFIG_SENSORS_F71882FG is not set
691# CONFIG_SENSORS_IT87 is not set
692# CONFIG_SENSORS_PC87360 is not set
689# CONFIG_SENSORS_PC87427 is not set 693# CONFIG_SENSORS_PC87427 is not set
690# CONFIG_SENSORS_SMSC47M1 is not set 694# CONFIG_SENSORS_SMSC47M1 is not set
691# CONFIG_SENSORS_SMSC47B397 is not set 695# CONFIG_SENSORS_SMSC47B397 is not set
692# CONFIG_SENSORS_VT1211 is not set 696# CONFIG_SENSORS_VT1211 is not set
693# CONFIG_SENSORS_W83627HF is not set 697# CONFIG_SENSORS_W83627HF is not set
698# CONFIG_SENSORS_W83627EHF is not set
694# CONFIG_HWMON_DEBUG_CHIP is not set 699# CONFIG_HWMON_DEBUG_CHIP is not set
700# CONFIG_THERMAL is not set
701# CONFIG_THERMAL_HWMON is not set
702# CONFIG_WATCHDOG is not set
703CONFIG_SSB_POSSIBLE=y
704
705#
706# Sonics Silicon Backplane
707#
708# CONFIG_SSB is not set
695 709
696# 710#
697# Multifunction device drivers 711# Multifunction device drivers
698# 712#
713# CONFIG_MFD_CORE is not set
699# CONFIG_MFD_SM501 is not set 714# CONFIG_MFD_SM501 is not set
715# CONFIG_HTC_PASIC3 is not set
716# CONFIG_MFD_TMIO is not set
717# CONFIG_REGULATOR is not set
700 718
701# 719#
702# Multimedia devices 720# Multimedia devices
703# 721#
722
723#
724# Multimedia core support
725#
704# CONFIG_VIDEO_DEV is not set 726# CONFIG_VIDEO_DEV is not set
705# CONFIG_DVB_CORE is not set 727# CONFIG_DVB_CORE is not set
706# CONFIG_DAB is not set 728# CONFIG_VIDEO_MEDIA is not set
707 729
708# 730#
709# Graphics support 731# Multimedia drivers
710# 732#
711# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 733# CONFIG_DAB is not set
712 734
713# 735#
714# Display device support 736# Graphics support
715# 737#
716# CONFIG_DISPLAY_SUPPORT is not set
717# CONFIG_VGASTATE is not set 738# CONFIG_VGASTATE is not set
739# CONFIG_VIDEO_OUTPUT_CONTROL is not set
718# CONFIG_FB is not set 740# CONFIG_FB is not set
741# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
719 742
720# 743#
721# Sound 744# Display device support
722# 745#
746# CONFIG_DISPLAY_SUPPORT is not set
723# CONFIG_SOUND is not set 747# CONFIG_SOUND is not set
724 748CONFIG_USB_SUPPORT=y
725#
726# USB support
727#
728CONFIG_USB_ARCH_HAS_HCD=y 749CONFIG_USB_ARCH_HAS_HCD=y
729# CONFIG_USB_ARCH_HAS_OHCI is not set 750# CONFIG_USB_ARCH_HAS_OHCI is not set
730# CONFIG_USB_ARCH_HAS_EHCI is not set 751# CONFIG_USB_ARCH_HAS_EHCI is not set
731# CONFIG_USB is not set 752# CONFIG_USB is not set
753# CONFIG_USB_OTG_WHITELIST is not set
754# CONFIG_USB_OTG_BLACKLIST_HUB is not set
732 755
733# 756#
734# Enable Host or Gadget support to see Inventra options 757# Enable Host or Gadget support to see Inventra options
735# 758#
736 759
737# 760#
738# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 761# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
739#
740
741#
742# USB Gadget Support
743# 762#
744# CONFIG_USB_GADGET is not set 763# CONFIG_USB_GADGET is not set
745# CONFIG_MMC is not set 764# CONFIG_MMC is not set
746 765# CONFIG_MEMSTICK is not set
747#
748# LED devices
749#
750# CONFIG_NEW_LEDS is not set 766# CONFIG_NEW_LEDS is not set
751 767# CONFIG_ACCESSIBILITY is not set
752#
753# LED drivers
754#
755
756#
757# LED Triggers
758#
759
760#
761# InfiniBand support
762#
763
764#
765# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
766#
767
768#
769# Real Time Clock
770#
771# CONFIG_RTC_CLASS is not set 768# CONFIG_RTC_CLASS is not set
772 769# CONFIG_DMADEVICES is not set
773# 770# CONFIG_UIO is not set
774# DMA Engine support 771# CONFIG_STAGING is not set
775#
776# CONFIG_DMA_ENGINE is not set
777
778#
779# DMA Clients
780#
781
782#
783# DMA Devices
784#
785
786#
787# PBX support
788#
789# CONFIG_PBX is not set
790 772
791# 773#
792# File systems 774# File systems
@@ -796,20 +778,18 @@ CONFIG_EXT2_FS_XATTR=y
796# CONFIG_EXT2_FS_POSIX_ACL is not set 778# CONFIG_EXT2_FS_POSIX_ACL is not set
797# CONFIG_EXT2_FS_SECURITY is not set 779# CONFIG_EXT2_FS_SECURITY is not set
798# CONFIG_EXT3_FS is not set 780# CONFIG_EXT3_FS is not set
799# CONFIG_EXT4DEV_FS is not set 781# CONFIG_EXT4_FS is not set
800CONFIG_FS_MBCACHE=y 782CONFIG_FS_MBCACHE=y
801# CONFIG_REISERFS_FS is not set 783# CONFIG_REISERFS_FS is not set
802# CONFIG_JFS_FS is not set 784# CONFIG_JFS_FS is not set
803# CONFIG_FS_POSIX_ACL is not set 785# CONFIG_FS_POSIX_ACL is not set
786CONFIG_FILE_LOCKING=y
804# CONFIG_XFS_FS is not set 787# CONFIG_XFS_FS is not set
805# CONFIG_GFS2_FS is not set
806# CONFIG_OCFS2_FS is not set 788# CONFIG_OCFS2_FS is not set
807# CONFIG_MINIX_FS is not set 789# CONFIG_DNOTIFY is not set
808# CONFIG_ROMFS_FS is not set
809CONFIG_INOTIFY=y 790CONFIG_INOTIFY=y
810CONFIG_INOTIFY_USER=y 791CONFIG_INOTIFY_USER=y
811# CONFIG_QUOTA is not set 792# CONFIG_QUOTA is not set
812# CONFIG_DNOTIFY is not set
813# CONFIG_AUTOFS_FS is not set 793# CONFIG_AUTOFS_FS is not set
814# CONFIG_AUTOFS4_FS is not set 794# CONFIG_AUTOFS4_FS is not set
815# CONFIG_FUSE_FS is not set 795# CONFIG_FUSE_FS is not set
@@ -835,7 +815,6 @@ CONFIG_PROC_SYSCTL=y
835CONFIG_SYSFS=y 815CONFIG_SYSFS=y
836# CONFIG_TMPFS is not set 816# CONFIG_TMPFS is not set
837# CONFIG_HUGETLB_PAGE is not set 817# CONFIG_HUGETLB_PAGE is not set
838CONFIG_RAMFS=y
839# CONFIG_CONFIGFS_FS is not set 818# CONFIG_CONFIGFS_FS is not set
840 819
841# 820#
@@ -848,60 +827,53 @@ CONFIG_RAMFS=y
848# CONFIG_BEFS_FS is not set 827# CONFIG_BEFS_FS is not set
849# CONFIG_BFS_FS is not set 828# CONFIG_BFS_FS is not set
850# CONFIG_EFS_FS is not set 829# CONFIG_EFS_FS is not set
851# CONFIG_YAFFS_FS is not set
852# CONFIG_JFFS2_FS is not set 830# CONFIG_JFFS2_FS is not set
831# CONFIG_YAFFS_FS is not set
853# CONFIG_CRAMFS is not set 832# CONFIG_CRAMFS is not set
854# CONFIG_VXFS_FS is not set 833# CONFIG_VXFS_FS is not set
834# CONFIG_MINIX_FS is not set
835# CONFIG_OMFS_FS is not set
855# CONFIG_HPFS_FS is not set 836# CONFIG_HPFS_FS is not set
856# CONFIG_QNX4FS_FS is not set 837# CONFIG_QNX4FS_FS is not set
838# CONFIG_ROMFS_FS is not set
857# CONFIG_SYSV_FS is not set 839# CONFIG_SYSV_FS is not set
858# CONFIG_UFS_FS is not set 840# CONFIG_UFS_FS is not set
859 841# CONFIG_NETWORK_FILESYSTEMS is not set
860#
861# Network File Systems
862#
863# CONFIG_NFS_FS is not set
864# CONFIG_NFSD is not set
865# CONFIG_SMB_FS is not set
866# CONFIG_CIFS is not set
867# CONFIG_NCP_FS is not set
868# CONFIG_CODA_FS is not set
869# CONFIG_AFS_FS is not set
870# CONFIG_9P_FS is not set
871 842
872# 843#
873# Partition Types 844# Partition Types
874# 845#
875# CONFIG_PARTITION_ADVANCED is not set 846# CONFIG_PARTITION_ADVANCED is not set
876CONFIG_MSDOS_PARTITION=y 847CONFIG_MSDOS_PARTITION=y
877
878#
879# Native Language Support
880#
881# CONFIG_NLS is not set 848# CONFIG_NLS is not set
882
883#
884# Distributed Lock Manager
885#
886# CONFIG_DLM is not set 849# CONFIG_DLM is not set
887 850
888# 851#
889# Profiling support
890#
891# CONFIG_PROFILING is not set
892
893#
894# Kernel hacking 852# Kernel hacking
895# 853#
896# CONFIG_PRINTK_TIME is not set 854# CONFIG_PRINTK_TIME is not set
855CONFIG_ENABLE_WARN_DEPRECATED=y
897CONFIG_ENABLE_MUST_CHECK=y 856CONFIG_ENABLE_MUST_CHECK=y
857CONFIG_FRAME_WARN=1024
898# CONFIG_MAGIC_SYSRQ is not set 858# CONFIG_MAGIC_SYSRQ is not set
899# CONFIG_UNUSED_SYMBOLS is not set 859# CONFIG_UNUSED_SYMBOLS is not set
900CONFIG_DEBUG_FS=y 860CONFIG_DEBUG_FS=y
901# CONFIG_HEADERS_CHECK is not set 861# CONFIG_HEADERS_CHECK is not set
862CONFIG_DEBUG_SECTION_MISMATCH=y
902# CONFIG_DEBUG_KERNEL is not set 863# CONFIG_DEBUG_KERNEL is not set
903# CONFIG_DEBUG_BUGVERBOSE is not set 864# CONFIG_DEBUG_BUGVERBOSE is not set
865# CONFIG_DEBUG_MEMORY_INIT is not set
866# CONFIG_RCU_CPU_STALL_DETECTOR is not set
867
868#
869# Tracers
870#
871# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
872# CONFIG_SAMPLES is not set
873CONFIG_HAVE_ARCH_KGDB=y
874CONFIG_DEBUG_VERBOSE=y
904CONFIG_DEBUG_MMRS=y 875CONFIG_DEBUG_MMRS=y
876# CONFIG_DEBUG_DOUBLEFAULT is not set
905CONFIG_DEBUG_HUNT_FOR_ZERO=y 877CONFIG_DEBUG_HUNT_FOR_ZERO=y
906CONFIG_DEBUG_BFIN_HWTRACE_ON=y 878CONFIG_DEBUG_BFIN_HWTRACE_ON=y
907CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y 879CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -919,13 +891,95 @@ CONFIG_ACCESS_CHECK=y
919# 891#
920# CONFIG_KEYS is not set 892# CONFIG_KEYS is not set
921CONFIG_SECURITY=y 893CONFIG_SECURITY=y
894# CONFIG_SECURITYFS is not set
922# CONFIG_SECURITY_NETWORK is not set 895# CONFIG_SECURITY_NETWORK is not set
923CONFIG_SECURITY_CAPABILITIES=y 896# CONFIG_SECURITY_FILE_CAPABILITIES is not set
897CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
898CONFIG_CRYPTO=y
899
900#
901# Crypto core or helper
902#
903# CONFIG_CRYPTO_FIPS is not set
904# CONFIG_CRYPTO_MANAGER is not set
905# CONFIG_CRYPTO_MANAGER2 is not set
906# CONFIG_CRYPTO_GF128MUL is not set
907# CONFIG_CRYPTO_NULL is not set
908# CONFIG_CRYPTO_CRYPTD is not set
909# CONFIG_CRYPTO_AUTHENC is not set
910# CONFIG_CRYPTO_TEST is not set
911
912#
913# Authenticated Encryption with Associated Data
914#
915# CONFIG_CRYPTO_CCM is not set
916# CONFIG_CRYPTO_GCM is not set
917# CONFIG_CRYPTO_SEQIV is not set
918
919#
920# Block modes
921#
922# CONFIG_CRYPTO_CBC is not set
923# CONFIG_CRYPTO_CTR is not set
924# CONFIG_CRYPTO_CTS is not set
925# CONFIG_CRYPTO_ECB is not set
926# CONFIG_CRYPTO_LRW is not set
927# CONFIG_CRYPTO_PCBC is not set
928# CONFIG_CRYPTO_XTS is not set
929
930#
931# Hash modes
932#
933# CONFIG_CRYPTO_HMAC is not set
934# CONFIG_CRYPTO_XCBC is not set
935
936#
937# Digest
938#
939# CONFIG_CRYPTO_CRC32C is not set
940# CONFIG_CRYPTO_MD4 is not set
941# CONFIG_CRYPTO_MD5 is not set
942# CONFIG_CRYPTO_MICHAEL_MIC is not set
943# CONFIG_CRYPTO_RMD128 is not set
944# CONFIG_CRYPTO_RMD160 is not set
945# CONFIG_CRYPTO_RMD256 is not set
946# CONFIG_CRYPTO_RMD320 is not set
947# CONFIG_CRYPTO_SHA1 is not set
948# CONFIG_CRYPTO_SHA256 is not set
949# CONFIG_CRYPTO_SHA512 is not set
950# CONFIG_CRYPTO_TGR192 is not set
951# CONFIG_CRYPTO_WP512 is not set
952
953#
954# Ciphers
955#
956# CONFIG_CRYPTO_AES is not set
957# CONFIG_CRYPTO_ANUBIS is not set
958# CONFIG_CRYPTO_ARC4 is not set
959# CONFIG_CRYPTO_BLOWFISH is not set
960# CONFIG_CRYPTO_CAMELLIA is not set
961# CONFIG_CRYPTO_CAST5 is not set
962# CONFIG_CRYPTO_CAST6 is not set
963# CONFIG_CRYPTO_DES is not set
964# CONFIG_CRYPTO_FCRYPT is not set
965# CONFIG_CRYPTO_KHAZAD is not set
966# CONFIG_CRYPTO_SALSA20 is not set
967# CONFIG_CRYPTO_SEED is not set
968# CONFIG_CRYPTO_SERPENT is not set
969# CONFIG_CRYPTO_TEA is not set
970# CONFIG_CRYPTO_TWOFISH is not set
971
972#
973# Compression
974#
975# CONFIG_CRYPTO_DEFLATE is not set
976# CONFIG_CRYPTO_LZO is not set
924 977
925# 978#
926# Cryptographic options 979# Random Number Generation
927# 980#
928# CONFIG_CRYPTO is not set 981# CONFIG_CRYPTO_ANSI_CPRNG is not set
982CONFIG_CRYPTO_HW=y
929 983
930# 984#
931# Library routines 985# Library routines
@@ -933,11 +987,12 @@ CONFIG_SECURITY_CAPABILITIES=y
933CONFIG_BITREVERSE=y 987CONFIG_BITREVERSE=y
934CONFIG_CRC_CCITT=m 988CONFIG_CRC_CCITT=m
935# CONFIG_CRC16 is not set 989# CONFIG_CRC16 is not set
990# CONFIG_CRC_T10DIF is not set
936# CONFIG_CRC_ITU_T is not set 991# CONFIG_CRC_ITU_T is not set
937CONFIG_CRC32=y 992CONFIG_CRC32=y
993# CONFIG_CRC7 is not set
938# CONFIG_LIBCRC32C is not set 994# CONFIG_LIBCRC32C is not set
939CONFIG_ZLIB_INFLATE=y 995CONFIG_ZLIB_INFLATE=y
940CONFIG_PLIST=y
941CONFIG_HAS_IOMEM=y 996CONFIG_HAS_IOMEM=y
942CONFIG_HAS_IOPORT=y 997CONFIG_HAS_IOPORT=y
943CONFIG_HAS_DMA=y 998CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 569523c1c034..80211303f6b9 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -49,7 +49,7 @@ CONFIG_LOG_BUF_SHIFT=14
49# CONFIG_SYSCTL is not set 49# CONFIG_SYSCTL is not set
50CONFIG_EMBEDDED=y 50CONFIG_EMBEDDED=y
51# CONFIG_UID16 is not set 51# CONFIG_UID16 is not set
52CONFIG_SYSCTL_SYSCALL=y 52# CONFIG_SYSCTL_SYSCALL is not set
53CONFIG_KALLSYMS=y 53CONFIG_KALLSYMS=y
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 54# CONFIG_KALLSYMS_EXTRA_PASS is not set
55# CONFIG_HOTPLUG is not set 55# CONFIG_HOTPLUG is not set
@@ -355,7 +355,7 @@ CONFIG_IP_FIB_HASH=y
355# CONFIG_NET_IPIP is not set 355# CONFIG_NET_IPIP is not set
356# CONFIG_NET_IPGRE is not set 356# CONFIG_NET_IPGRE is not set
357# CONFIG_ARPD is not set 357# CONFIG_ARPD is not set
358CONFIG_SYN_COOKIES=y 358# CONFIG_SYN_COOKIES is not set
359# CONFIG_INET_AH is not set 359# CONFIG_INET_AH is not set
360# CONFIG_INET_ESP is not set 360# CONFIG_INET_ESP is not set
361# CONFIG_INET_IPCOMP is not set 361# CONFIG_INET_IPCOMP is not set
@@ -556,9 +556,9 @@ CONFIG_SMC91X=y
556# CONFIG_BFIN_MAC is not set 556# CONFIG_BFIN_MAC is not set
557# CONFIG_SMSC911X is not set 557# CONFIG_SMSC911X is not set
558# CONFIG_DM9000 is not set 558# CONFIG_DM9000 is not set
559CONFIG_NETDEV_1000=y 559# CONFIG_NETDEV_1000 is not set
560# CONFIG_AX88180 is not set 560# CONFIG_AX88180 is not set
561CONFIG_NETDEV_10000=y 561# CONFIG_NETDEV_10000 is not set
562 562
563# 563#
564# Wireless LAN 564# Wireless LAN
@@ -652,6 +652,10 @@ CONFIG_UNIX98_PTYS=y
652# CONFIG_TCG_TPM is not set 652# CONFIG_TCG_TPM is not set
653# CONFIG_I2C is not set 653# CONFIG_I2C is not set
654 654
655CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
656CONFIG_GPIOLIB=y
657CONFIG_GPIO_SYSFS=y
658
655# 659#
656# SPI support 660# SPI support
657# 661#
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 035b635e599c..dd815f0d1517 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -49,7 +49,7 @@ CONFIG_INITRAMFS_SOURCE=""
49# CONFIG_SYSCTL is not set 49# CONFIG_SYSCTL is not set
50CONFIG_EMBEDDED=y 50CONFIG_EMBEDDED=y
51CONFIG_UID16=y 51CONFIG_UID16=y
52CONFIG_SYSCTL_SYSCALL=y 52# CONFIG_SYSCTL_SYSCALL is not set
53CONFIG_KALLSYMS=y 53CONFIG_KALLSYMS=y
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 54# CONFIG_KALLSYMS_EXTRA_PASS is not set
55CONFIG_HOTPLUG=y 55CONFIG_HOTPLUG=y
@@ -125,9 +125,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
125CONFIG_BF548=y 125CONFIG_BF548=y
126# CONFIG_BF549 is not set 126# CONFIG_BF549 is not set
127# CONFIG_BF561 is not set 127# CONFIG_BF561 is not set
128CONFIG_BF_REV_0_0=y 128# CONFIG_BF_REV_0_0 is not set
129# CONFIG_BF_REV_0_1 is not set 129# CONFIG_BF_REV_0_1 is not set
130# CONFIG_BF_REV_0_2 is not set 130CONFIG_BF_REV_0_2=y
131# CONFIG_BF_REV_0_3 is not set 131# CONFIG_BF_REV_0_3 is not set
132# CONFIG_BF_REV_0_4 is not set 132# CONFIG_BF_REV_0_4 is not set
133# CONFIG_BF_REV_0_5 is not set 133# CONFIG_BF_REV_0_5 is not set
@@ -422,7 +422,7 @@ CONFIG_IP_PNP=y
422# CONFIG_NET_IPIP is not set 422# CONFIG_NET_IPIP is not set
423# CONFIG_NET_IPGRE is not set 423# CONFIG_NET_IPGRE is not set
424# CONFIG_ARPD is not set 424# CONFIG_ARPD is not set
425CONFIG_SYN_COOKIES=y 425# CONFIG_SYN_COOKIES is not set
426# CONFIG_INET_AH is not set 426# CONFIG_INET_AH is not set
427# CONFIG_INET_ESP is not set 427# CONFIG_INET_ESP is not set
428# CONFIG_INET_IPCOMP is not set 428# CONFIG_INET_IPCOMP is not set
@@ -811,6 +811,10 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
811# CONFIG_I2C_DEBUG_BUS is not set 811# CONFIG_I2C_DEBUG_BUS is not set
812# CONFIG_I2C_DEBUG_CHIP is not set 812# CONFIG_I2C_DEBUG_CHIP is not set
813 813
814CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
815CONFIG_GPIOLIB=y
816CONFIG_GPIO_SYSFS=y
817
814# 818#
815# SPI support 819# SPI support
816# 820#
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 7015e42ccce5..16c198bd40c5 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -49,7 +49,7 @@ CONFIG_FAIR_USER_SCHED=y
49# CONFIG_SYSCTL is not set 49# CONFIG_SYSCTL is not set
50CONFIG_EMBEDDED=y 50CONFIG_EMBEDDED=y
51# CONFIG_UID16 is not set 51# CONFIG_UID16 is not set
52CONFIG_SYSCTL_SYSCALL=y 52# CONFIG_SYSCTL_SYSCALL is not set
53CONFIG_KALLSYMS=y 53CONFIG_KALLSYMS=y
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 54# CONFIG_KALLSYMS_EXTRA_PASS is not set
55# CONFIG_HOTPLUG is not set 55# CONFIG_HOTPLUG is not set
@@ -389,7 +389,7 @@ CONFIG_IP_FIB_HASH=y
389# CONFIG_NET_IPIP is not set 389# CONFIG_NET_IPIP is not set
390# CONFIG_NET_IPGRE is not set 390# CONFIG_NET_IPGRE is not set
391# CONFIG_ARPD is not set 391# CONFIG_ARPD is not set
392CONFIG_SYN_COOKIES=y 392# CONFIG_SYN_COOKIES is not set
393# CONFIG_INET_AH is not set 393# CONFIG_INET_AH is not set
394# CONFIG_INET_ESP is not set 394# CONFIG_INET_ESP is not set
395# CONFIG_INET_IPCOMP is not set 395# CONFIG_INET_IPCOMP is not set
@@ -569,9 +569,9 @@ CONFIG_SMC91X=y
569# CONFIG_IBM_NEW_EMAC_TAH is not set 569# CONFIG_IBM_NEW_EMAC_TAH is not set
570# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 570# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
571# CONFIG_B44 is not set 571# CONFIG_B44 is not set
572CONFIG_NETDEV_1000=y 572# CONFIG_NETDEV_1000 is not set
573# CONFIG_AX88180 is not set 573# CONFIG_AX88180 is not set
574CONFIG_NETDEV_10000=y 574# CONFIG_NETDEV_10000 is not set
575 575
576# 576#
577# Wireless LAN 577# Wireless LAN
@@ -646,6 +646,10 @@ CONFIG_UNIX98_PTYS=y
646# CONFIG_TCG_TPM is not set 646# CONFIG_TCG_TPM is not set
647# CONFIG_I2C is not set 647# CONFIG_I2C is not set
648 648
649CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
650CONFIG_GPIOLIB=y
651CONFIG_GPIO_SYSFS=y
652
649# 653#
650# SPI support 654# SPI support
651# 655#
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index dfc8e1ddd77a..6b4c1a982383 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -48,7 +48,7 @@ CONFIG_SYSFS_DEPRECATED=y
48# CONFIG_SYSCTL is not set 48# CONFIG_SYSCTL is not set
49CONFIG_EMBEDDED=y 49CONFIG_EMBEDDED=y
50CONFIG_UID16=y 50CONFIG_UID16=y
51CONFIG_SYSCTL_SYSCALL=y 51# CONFIG_SYSCTL_SYSCALL is not set
52CONFIG_KALLSYMS=y 52CONFIG_KALLSYMS=y
53# CONFIG_KALLSYMS_EXTRA_PASS is not set 53# CONFIG_KALLSYMS_EXTRA_PASS is not set
54CONFIG_HOTPLUG=y 54CONFIG_HOTPLUG=y
@@ -347,7 +347,7 @@ CONFIG_IP_PNP=y
347# CONFIG_NET_IPIP is not set 347# CONFIG_NET_IPIP is not set
348# CONFIG_NET_IPGRE is not set 348# CONFIG_NET_IPGRE is not set
349# CONFIG_ARPD is not set 349# CONFIG_ARPD is not set
350CONFIG_SYN_COOKIES=y 350# CONFIG_SYN_COOKIES is not set
351# CONFIG_INET_AH is not set 351# CONFIG_INET_AH is not set
352# CONFIG_INET_ESP is not set 352# CONFIG_INET_ESP is not set
353# CONFIG_INET_IPCOMP is not set 353# CONFIG_INET_IPCOMP is not set
@@ -594,8 +594,8 @@ CONFIG_MII=y
594# CONFIG_SMC91X is not set 594# CONFIG_SMC91X is not set
595# CONFIG_SMSC911X is not set 595# CONFIG_SMSC911X is not set
596CONFIG_DM9000=y 596CONFIG_DM9000=y
597CONFIG_NETDEV_1000=y 597# CONFIG_NETDEV_1000 is not set
598CONFIG_NETDEV_10000=y 598# CONFIG_NETDEV_10000 is not set
599# CONFIG_AX88180 is not set 599# CONFIG_AX88180 is not set
600 600
601# 601#
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 95a5f91aebaa..1ec9ae2e964b 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -49,7 +49,7 @@ CONFIG_INITRAMFS_SOURCE=""
49# CONFIG_SYSCTL is not set 49# CONFIG_SYSCTL is not set
50CONFIG_EMBEDDED=y 50CONFIG_EMBEDDED=y
51CONFIG_UID16=y 51CONFIG_UID16=y
52CONFIG_SYSCTL_SYSCALL=y 52# CONFIG_SYSCTL_SYSCALL is not set
53CONFIG_KALLSYMS=y 53CONFIG_KALLSYMS=y
54# CONFIG_KALLSYMS_EXTRA_PASS is not set 54# CONFIG_KALLSYMS_EXTRA_PASS is not set
55# CONFIG_HOTPLUG is not set 55# CONFIG_HOTPLUG is not set
@@ -355,7 +355,7 @@ CONFIG_IP_PNP=y
355# CONFIG_NET_IPIP is not set 355# CONFIG_NET_IPIP is not set
356# CONFIG_NET_IPGRE is not set 356# CONFIG_NET_IPGRE is not set
357# CONFIG_ARPD is not set 357# CONFIG_ARPD is not set
358CONFIG_SYN_COOKIES=y 358# CONFIG_SYN_COOKIES is not set
359# CONFIG_INET_AH is not set 359# CONFIG_INET_AH is not set
360# CONFIG_INET_ESP is not set 360# CONFIG_INET_ESP is not set
361# CONFIG_INET_IPCOMP is not set 361# CONFIG_INET_IPCOMP is not set
@@ -672,9 +672,9 @@ CONFIG_MII=y
672# CONFIG_SMC91X is not set 672# CONFIG_SMC91X is not set
673# CONFIG_SMSC911X is not set 673# CONFIG_SMSC911X is not set
674CONFIG_DM9000=y 674CONFIG_DM9000=y
675CONFIG_NETDEV_1000=y 675# CONFIG_NETDEV_1000 is not set
676# CONFIG_AX88180 is not set 676# CONFIG_AX88180 is not set
677CONFIG_NETDEV_10000=y 677# CONFIG_NETDEV_10000 is not set
678 678
679# 679#
680# Wireless LAN 680# Wireless LAN
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index 78e24080e7f1..09701f907e9b 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -1,6 +1,6 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.7 3# Linux kernel version: 2.6.28.10
4# 4#
5# CONFIG_MMU is not set 5# CONFIG_MMU is not set
6# CONFIG_FPU is not set 6# CONFIG_FPU is not set
@@ -40,26 +40,26 @@ CONFIG_LOG_BUF_SHIFT=14
40# CONFIG_NAMESPACES is not set 40# CONFIG_NAMESPACES is not set
41# CONFIG_BLK_DEV_INITRD is not set 41# CONFIG_BLK_DEV_INITRD is not set
42# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 42# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
43# CONFIG_SYSCTL is not set 43CONFIG_SYSCTL=y
44CONFIG_ANON_INODES=y
44CONFIG_EMBEDDED=y 45CONFIG_EMBEDDED=y
45CONFIG_UID16=y 46CONFIG_UID16=y
46CONFIG_SYSCTL_SYSCALL=y 47# CONFIG_SYSCTL_SYSCALL is not set
47CONFIG_KALLSYMS=y 48CONFIG_KALLSYMS=y
48# CONFIG_KALLSYMS_EXTRA_PASS is not set 49# CONFIG_KALLSYMS_EXTRA_PASS is not set
49CONFIG_HOTPLUG=y 50CONFIG_HOTPLUG=y
50CONFIG_PRINTK=y 51CONFIG_PRINTK=y
51CONFIG_BUG=y 52CONFIG_BUG=y
52# CONFIG_ELF_CORE is not set 53# CONFIG_ELF_CORE is not set
53CONFIG_COMPAT_BRK=y
54CONFIG_BASE_FULL=y 54CONFIG_BASE_FULL=y
55# CONFIG_FUTEX is not set 55# CONFIG_FUTEX is not set
56CONFIG_ANON_INODES=y
57CONFIG_EPOLL=y 56CONFIG_EPOLL=y
58CONFIG_SIGNALFD=y 57CONFIG_SIGNALFD=y
59CONFIG_TIMERFD=y 58CONFIG_TIMERFD=y
60CONFIG_EVENTFD=y 59CONFIG_EVENTFD=y
61# CONFIG_AIO is not set 60# CONFIG_AIO is not set
62CONFIG_VM_EVENT_COUNTERS=y 61CONFIG_VM_EVENT_COUNTERS=y
62CONFIG_COMPAT_BRK=y
63CONFIG_SLAB=y 63CONFIG_SLAB=y
64# CONFIG_SLUB is not set 64# CONFIG_SLUB is not set
65# CONFIG_SLOB is not set 65# CONFIG_SLOB is not set
@@ -68,7 +68,6 @@ CONFIG_SLAB=y
68CONFIG_HAVE_OPROFILE=y 68CONFIG_HAVE_OPROFILE=y
69# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 69# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
70CONFIG_SLABINFO=y 70CONFIG_SLABINFO=y
71CONFIG_RT_MUTEXES=y
72CONFIG_TINY_SHMEM=y 71CONFIG_TINY_SHMEM=y
73CONFIG_BASE_SMALL=0 72CONFIG_BASE_SMALL=0
74CONFIG_MODULES=y 73CONFIG_MODULES=y
@@ -229,7 +228,10 @@ CONFIG_HZ=250
229# CONFIG_SCHED_HRTICK is not set 228# CONFIG_SCHED_HRTICK is not set
230CONFIG_GENERIC_TIME=y 229CONFIG_GENERIC_TIME=y
231CONFIG_GENERIC_CLOCKEVENTS=y 230CONFIG_GENERIC_CLOCKEVENTS=y
231# CONFIG_TICKSOURCE_GPTMR0 is not set
232CONFIG_TICKSOURCE_CORETMR=y
232# CONFIG_CYCLES_CLOCKSOURCE is not set 233# CONFIG_CYCLES_CLOCKSOURCE is not set
234# CONFIG_GPTMR0_CLOCKSOURCE is not set
233# CONFIG_NO_HZ is not set 235# CONFIG_NO_HZ is not set
234# CONFIG_HIGH_RES_TIMERS is not set 236# CONFIG_HIGH_RES_TIMERS is not set
235CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 237CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -374,7 +376,7 @@ CONFIG_IP_PNP=y
374# CONFIG_NET_IPIP is not set 376# CONFIG_NET_IPIP is not set
375# CONFIG_NET_IPGRE is not set 377# CONFIG_NET_IPGRE is not set
376# CONFIG_ARPD is not set 378# CONFIG_ARPD is not set
377CONFIG_SYN_COOKIES=y 379# CONFIG_SYN_COOKIES is not set
378# CONFIG_INET_AH is not set 380# CONFIG_INET_AH is not set
379# CONFIG_INET_ESP is not set 381# CONFIG_INET_ESP is not set
380# CONFIG_INET_IPCOMP is not set 382# CONFIG_INET_IPCOMP is not set
@@ -598,9 +600,8 @@ CONFIG_BFIN_MAC_RMII=y
598# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 600# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
599# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 601# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
600# CONFIG_B44 is not set 602# CONFIG_B44 is not set
601CONFIG_NETDEV_1000=y 603# CONFIG_NETDEV_1000 is not set
602# CONFIG_AX88180 is not set 604# CONFIG_NETDEV_10000 is not set
603CONFIG_NETDEV_10000=y
604 605
605# 606#
606# Wireless LAN 607# Wireless LAN
@@ -640,11 +641,11 @@ CONFIG_INPUT_EVDEV=y
640# CONFIG_INPUT_JOYSTICK is not set 641# CONFIG_INPUT_JOYSTICK is not set
641# CONFIG_INPUT_TABLET is not set 642# CONFIG_INPUT_TABLET is not set
642CONFIG_INPUT_TOUCHSCREEN=y 643CONFIG_INPUT_TOUCHSCREEN=y
643# CONFIG_TOUCHSCREEN_ADS7846 is not set
644CONFIG_TOUCHSCREEN_AD7877=y 644CONFIG_TOUCHSCREEN_AD7877=y
645# CONFIG_TOUCHSCREEN_AD7879_I2C is not set 645# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
646# CONFIG_TOUCHSCREEN_AD7879_SPI is not set 646# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
647# CONFIG_TOUCHSCREEN_AD7879 is not set 647# CONFIG_TOUCHSCREEN_AD7879 is not set
648# CONFIG_TOUCHSCREEN_ADS7846 is not set
648# CONFIG_TOUCHSCREEN_FUJITSU is not set 649# CONFIG_TOUCHSCREEN_FUJITSU is not set
649# CONFIG_TOUCHSCREEN_GUNZE is not set 650# CONFIG_TOUCHSCREEN_GUNZE is not set
650# CONFIG_TOUCHSCREEN_ELO is not set 651# CONFIG_TOUCHSCREEN_ELO is not set
@@ -676,14 +677,14 @@ CONFIG_INPUT_UINPUT=y
676# Character devices 677# Character devices
677# 678#
678# CONFIG_AD9960 is not set 679# CONFIG_AD9960 is not set
679# CONFIG_SPI_ADC_BF533 is not set 680CONFIG_BFIN_DMA_INTERFACE=m
680# CONFIG_BF5xx_PPIFCD is not set 681# CONFIG_BFIN_PPI is not set
682# CONFIG_BFIN_PPIFCD is not set
681# CONFIG_BFIN_SIMPLE_TIMER is not set 683# CONFIG_BFIN_SIMPLE_TIMER is not set
682# CONFIG_BF5xx_PPI is not set 684# CONFIG_BFIN_SPI_ADC is not set
683CONFIG_BFIN_SPORT=y 685CONFIG_BFIN_SPORT=y
684# CONFIG_BFIN_TIMER_LATENCY is not set 686# CONFIG_BFIN_TIMER_LATENCY is not set
685CONFIG_TWI_LCD=m 687# CONFIG_BFIN_TWI_LCD is not set
686CONFIG_BFIN_DMA_INTERFACE=m
687# CONFIG_SIMPLE_GPIO is not set 688# CONFIG_SIMPLE_GPIO is not set
688# CONFIG_VT is not set 689# CONFIG_VT is not set
689CONFIG_DEVKMEM=y 690CONFIG_DEVKMEM=y
@@ -796,6 +797,7 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
796# CONFIG_POWER_SUPPLY is not set 797# CONFIG_POWER_SUPPLY is not set
797CONFIG_HWMON=y 798CONFIG_HWMON=y
798# CONFIG_HWMON_VID is not set 799# CONFIG_HWMON_VID is not set
800# CONFIG_SENSORS_AD5252 is not set
799# CONFIG_SENSORS_AD7414 is not set 801# CONFIG_SENSORS_AD7414 is not set
800# CONFIG_SENSORS_AD7418 is not set 802# CONFIG_SENSORS_AD7418 is not set
801# CONFIG_SENSORS_ADCXX is not set 803# CONFIG_SENSORS_ADCXX is not set
@@ -867,6 +869,7 @@ CONFIG_SSB_POSSIBLE=y
867# CONFIG_HTC_PASIC3 is not set 869# CONFIG_HTC_PASIC3 is not set
868# CONFIG_MFD_TMIO is not set 870# CONFIG_MFD_TMIO is not set
869# CONFIG_PMIC_DA903X is not set 871# CONFIG_PMIC_DA903X is not set
872# CONFIG_PMIC_ADP5520 is not set
870# CONFIG_MFD_WM8400 is not set 873# CONFIG_MFD_WM8400 is not set
871# CONFIG_MFD_WM8350_I2C is not set 874# CONFIG_MFD_WM8350_I2C is not set
872# CONFIG_REGULATOR is not set 875# CONFIG_REGULATOR is not set
@@ -1111,6 +1114,7 @@ CONFIG_SYSFS=y
1111# CONFIG_BEFS_FS is not set 1114# CONFIG_BEFS_FS is not set
1112# CONFIG_BFS_FS is not set 1115# CONFIG_BFS_FS is not set
1113# CONFIG_EFS_FS is not set 1116# CONFIG_EFS_FS is not set
1117# CONFIG_JFFS2_FS is not set
1114CONFIG_YAFFS_FS=y 1118CONFIG_YAFFS_FS=y
1115CONFIG_YAFFS_YAFFS1=y 1119CONFIG_YAFFS_YAFFS1=y
1116# CONFIG_YAFFS_9BYTE_TAGS is not set 1120# CONFIG_YAFFS_9BYTE_TAGS is not set
@@ -1121,7 +1125,6 @@ CONFIG_YAFFS_AUTO_YAFFS2=y
1121# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set 1125# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1122# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set 1126# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1123CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y 1127CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1124# CONFIG_JFFS2_FS is not set
1125# CONFIG_CRAMFS is not set 1128# CONFIG_CRAMFS is not set
1126# CONFIG_VXFS_FS is not set 1129# CONFIG_VXFS_FS is not set
1127# CONFIG_MINIX_FS is not set 1130# CONFIG_MINIX_FS is not set
@@ -1213,7 +1216,6 @@ CONFIG_FRAME_WARN=1024
1213# CONFIG_DEBUG_BUGVERBOSE is not set 1216# CONFIG_DEBUG_BUGVERBOSE is not set
1214# CONFIG_DEBUG_MEMORY_INIT is not set 1217# CONFIG_DEBUG_MEMORY_INIT is not set
1215# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1218# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1216# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1217 1219
1218# 1220#
1219# Tracers 1221# Tracers
@@ -1343,7 +1345,6 @@ CONFIG_CRC32=y
1343# CONFIG_CRC7 is not set 1345# CONFIG_CRC7 is not set
1344# CONFIG_LIBCRC32C is not set 1346# CONFIG_LIBCRC32C is not set
1345CONFIG_ZLIB_INFLATE=y 1347CONFIG_ZLIB_INFLATE=y
1346CONFIG_PLIST=y
1347CONFIG_HAS_IOMEM=y 1348CONFIG_HAS_IOMEM=y
1348CONFIG_HAS_IOPORT=y 1349CONFIG_HAS_IOPORT=y
1349CONFIG_HAS_DMA=y 1350CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index 2bc0779d22ea..ec84a53daae9 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -52,7 +52,7 @@ CONFIG_INITRAMFS_SOURCE=""
52# CONFIG_SYSCTL is not set 52# CONFIG_SYSCTL is not set
53CONFIG_EMBEDDED=y 53CONFIG_EMBEDDED=y
54CONFIG_UID16=y 54CONFIG_UID16=y
55CONFIG_SYSCTL_SYSCALL=y 55# CONFIG_SYSCTL_SYSCALL is not set
56CONFIG_KALLSYMS=y 56CONFIG_KALLSYMS=y
57CONFIG_KALLSYMS_ALL=y 57CONFIG_KALLSYMS_ALL=y
58# CONFIG_KALLSYMS_EXTRA_PASS is not set 58# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -373,7 +373,7 @@ CONFIG_IP_PNP=y
373# CONFIG_NET_IPIP is not set 373# CONFIG_NET_IPIP is not set
374# CONFIG_NET_IPGRE is not set 374# CONFIG_NET_IPGRE is not set
375# CONFIG_ARPD is not set 375# CONFIG_ARPD is not set
376CONFIG_SYN_COOKIES=y 376# CONFIG_SYN_COOKIES is not set
377# CONFIG_INET_AH is not set 377# CONFIG_INET_AH is not set
378# CONFIG_INET_ESP is not set 378# CONFIG_INET_ESP is not set
379# CONFIG_INET_IPCOMP is not set 379# CONFIG_INET_IPCOMP is not set
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index e65b3a49214f..6e2796240fdc 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -42,7 +42,7 @@ CONFIG_LOG_BUF_SHIFT=14
42# CONFIG_SYSCTL is not set 42# CONFIG_SYSCTL is not set
43CONFIG_EMBEDDED=y 43CONFIG_EMBEDDED=y
44# CONFIG_UID16 is not set 44# CONFIG_UID16 is not set
45CONFIG_SYSCTL_SYSCALL=y 45# CONFIG_SYSCTL_SYSCALL is not set
46CONFIG_KALLSYMS=y 46CONFIG_KALLSYMS=y
47# CONFIG_KALLSYMS_EXTRA_PASS is not set 47# CONFIG_KALLSYMS_EXTRA_PASS is not set
48# CONFIG_HOTPLUG is not set 48# CONFIG_HOTPLUG is not set
@@ -537,7 +537,30 @@ CONFIG_SPI_BFIN=y
537# CONFIG_SPI_SPIDEV is not set 537# CONFIG_SPI_SPIDEV is not set
538# CONFIG_SPI_TLE62X0 is not set 538# CONFIG_SPI_TLE62X0 is not set
539CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 539CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
540# CONFIG_GPIOLIB is not set 540CONFIG_GPIOLIB=y
541# CONFIG_DEBUG_GPIO is not set
542CONFIG_GPIO_SYSFS=y
543
544#
545# Memory mapped GPIO expanders:
546#
547
548#
549# I2C GPIO expanders:
550#
551# CONFIG_GPIO_MAX732X is not set
552# CONFIG_GPIO_PCA953X is not set
553# CONFIG_GPIO_PCF857X is not set
554
555#
556# PCI GPIO expanders:
557#
558
559#
560# SPI GPIO expanders:
561#
562# CONFIG_GPIO_MAX7301 is not set
563# CONFIG_GPIO_MCP23S08 is not set
541# CONFIG_W1 is not set 564# CONFIG_W1 is not set
542# CONFIG_POWER_SUPPLY is not set 565# CONFIG_POWER_SUPPLY is not set
543# CONFIG_HWMON is not set 566# CONFIG_HWMON is not set
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 1b040f5b4feb..94697f0f6f40 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -30,7 +30,8 @@
30#ifndef _BLACKFIN_CACHEFLUSH_H 30#ifndef _BLACKFIN_CACHEFLUSH_H
31#define _BLACKFIN_CACHEFLUSH_H 31#define _BLACKFIN_CACHEFLUSH_H
32 32
33extern void blackfin_icache_dcache_flush_range(unsigned long start_address, unsigned long end_address); 33#include <asm/blackfin.h> /* for SSYNC() */
34
34extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address); 35extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
35extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address); 36extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
36extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); 37extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
@@ -54,32 +55,28 @@ extern void blackfin_invalidate_entire_dcache(void);
54 55
55static inline void flush_icache_range(unsigned start, unsigned end) 56static inline void flush_icache_range(unsigned start, unsigned end)
56{ 57{
57#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE) 58#if defined(CONFIG_BFIN_WB)
58 59 blackfin_dcache_flush_range(start, end);
59# if defined(CONFIG_BFIN_WT) 60#endif
60 blackfin_icache_flush_range((start), (end));
61 flush_icache_range_others(start, end);
62# else
63 blackfin_icache_dcache_flush_range((start), (end));
64# endif
65
66#else
67 61
68# if defined(CONFIG_BFIN_ICACHE) 62 /* Make sure all write buffers in the data side of the core
69 blackfin_icache_flush_range((start), (end)); 63 * are flushed before trying to invalidate the icache. This
64 * needs to be after the data flush and before the icache
65 * flush so that the SSYNC does the right thing in preventing
66 * the instruction prefetcher from hitting things in cached
67 * memory at the wrong time -- it runs much further ahead than
68 * the pipeline.
69 */
70 SSYNC();
71#if defined(CONFIG_BFIN_ICACHE)
72 blackfin_icache_flush_range(start, end);
70 flush_icache_range_others(start, end); 73 flush_icache_range_others(start, end);
71# endif
72# if defined(CONFIG_BFIN_DCACHE)
73 blackfin_dcache_flush_range((start), (end));
74# endif
75
76#endif 74#endif
77} 75}
78 76
79#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 77#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
80do { memcpy(dst, src, len); \ 78do { memcpy(dst, src, len); \
81 flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ 79 flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
82 flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\
83} while (0) 80} while (0)
84 81
85#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) 82#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len)
@@ -111,6 +108,11 @@ static inline int bfin_addr_dcachable(unsigned long addr)
111 addr >= _ramend && addr < physical_mem_end) 108 addr >= _ramend && addr < physical_mem_end)
112 return 1; 109 return 1;
113 110
111#ifndef CONFIG_BFIN_L2_NOT_CACHED
112 if (addr >= L2_START && addr < L2_START + L2_LENGTH)
113 return 1;
114#endif
115
114 return 0; 116 return 0;
115} 117}
116 118
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index ad566ff9ad16..a75a6a9f0949 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -53,29 +53,32 @@
53#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON) 53#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
54#endif 54#endif
55 55
56#define SDRAM_DNON_CHBL (CPLB_COMMON)
57#define SDRAM_EBIU (CPLB_COMMON)
58#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY)
59
56#define L1_DMEMORY (CPLB_LOCK | CPLB_COMMON) 60#define L1_DMEMORY (CPLB_LOCK | CPLB_COMMON)
57 61
58#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
59#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB) 63#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB)
60#define L2_IMEMORY (CPLB_COMMON | CPLB_LOCK) 64#define L2_IMEMORY (CPLB_COMMON)
61#define L2_DMEMORY (CPLB_COMMON | CPLB_LOCK) 65#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON)
62 66
63#else 67#else
64#ifdef CONFIG_BFIN_L2_CACHEABLE 68#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
65#define L2_IMEMORY (SDRAM_IGENERIC) 69#define L2_IMEMORY (SDRAM_IGENERIC)
66#define L2_DMEMORY (SDRAM_DGENERIC) 70
67#else 71# if defined(CONFIG_BFIN_L2_WB)
68#define L2_IMEMORY (CPLB_COMMON) 72# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON)
69#define L2_DMEMORY (CPLB_COMMON) 73# elif defined(CONFIG_BFIN_L2_WT)
70#endif /* CONFIG_BFIN_L2_CACHEABLE */ 74# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
71 75# elif defined(CONFIG_BFIN_L2_NOT_CACHED)
72#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB) 76# define L2_DMEMORY (CPLB_COMMON)
77# else
78# define L2_DMEMORY (0)
79# endif
73#endif /* CONFIG_SMP */ 80#endif /* CONFIG_SMP */
74 81
75#define SDRAM_DNON_CHBL (CPLB_COMMON)
76#define SDRAM_EBIU (CPLB_COMMON)
77#define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY)
78
79#define SIZE_1K 0x00000400 /* 1K */ 82#define SIZE_1K 0x00000400 /* 1K */
80#define SIZE_4K 0x00001000 /* 4K */ 83#define SIZE_4K 0x00001000 /* 4K */
81#define SIZE_1M 0x00100000 /* 1M */ 84#define SIZE_1M 0x00100000 /* 1M */
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index e4f7b8043f02..c9a59622e23f 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -206,10 +206,16 @@ static inline unsigned long get_dma_curr_addr(unsigned int channel)
206 206
207static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize) 207static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize)
208{ 208{
209 /* Make sure the internal data buffers in the core are drained
210 * so that the DMA descriptors are completely written when the
211 * DMA engine goes to fetch them below.
212 */
213 SSYNC();
214
215 dma_ch[channel].regs->next_desc_ptr = sg;
209 dma_ch[channel].regs->cfg = 216 dma_ch[channel].regs->cfg =
210 (dma_ch[channel].regs->cfg & ~(0xf << 8)) | 217 (dma_ch[channel].regs->cfg & ~(0xf << 8)) |
211 ((ndsize & 0xf) << 8); 218 ((ndsize & 0xf) << 8);
212 dma_ch[channel].regs->next_desc_ptr = sg;
213} 219}
214 220
215static inline int dma_channel_active(unsigned int channel) 221static inline int dma_channel_active(unsigned int channel)
@@ -253,5 +259,7 @@ static inline void clear_dma_irqstat(unsigned int channel)
253void *dma_memcpy(void *dest, const void *src, size_t count); 259void *dma_memcpy(void *dest, const void *src, size_t count);
254void *safe_dma_memcpy(void *dest, const void *src, size_t count); 260void *safe_dma_memcpy(void *dest, const void *src, size_t count);
255void blackfin_dma_early_init(void); 261void blackfin_dma_early_init(void);
262void early_dma_memcpy(void *dest, const void *src, size_t count);
263void early_dma_memcpy_done(void);
256 264
257#endif 265#endif
diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h
index cdbfcfc30f6a..230e1605d3fb 100644
--- a/arch/blackfin/include/asm/elf.h
+++ b/arch/blackfin/include/asm/elf.h
@@ -55,50 +55,50 @@ do { \
55#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC 55#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
56#define ELF_EXEC_PAGESIZE 4096 56#define ELF_EXEC_PAGESIZE 4096
57 57
58#define R_unused0 0 /* relocation type 0 is not defined */ 58#define R_BFIN_UNUSED0 0 /* relocation type 0 is not defined */
59#define R_pcrel5m2 1 /*LSETUP part a */ 59#define R_BFIN_PCREL5M2 1 /* LSETUP part a */
60#define R_unused1 2 /* relocation type 2 is not defined */ 60#define R_BFIN_UNUSED1 2 /* relocation type 2 is not defined */
61#define R_pcrel10 3 /* type 3, if cc jump <target> */ 61#define R_BFIN_PCREL10 3 /* type 3, if cc jump <target> */
62#define R_pcrel12_jump 4 /* type 4, jump <target> */ 62#define R_BFIN_PCREL12_JUMP 4 /* type 4, jump <target> */
63#define R_rimm16 5 /* type 0x5, rN = <target> */ 63#define R_BFIN_RIMM16 5 /* type 0x5, rN = <target> */
64#define R_luimm16 6 /* # 0x6, preg.l=<target> Load imm 16 to lower half */ 64#define R_BFIN_LUIMM16 6 /* # 0x6, preg.l=<target> Load imm 16 to lower half */
65#define R_huimm16 7 /* # 0x7, preg.h=<target> Load imm 16 to upper half */ 65#define R_BFIN_HUIMM16 7 /* # 0x7, preg.h=<target> Load imm 16 to upper half */
66#define R_pcrel12_jump_s 8 /* # 0x8 jump.s <target> */ 66#define R_BFIN_PCREL12_JUMP_S 8 /* # 0x8 jump.s <target> */
67#define R_pcrel24_jump_x 9 /* # 0x9 jump.x <target> */ 67#define R_BFIN_PCREL24_JUMP_X 9 /* # 0x9 jump.x <target> */
68#define R_pcrel24 10 /* # 0xa call <target> , not expandable */ 68#define R_BFIN_PCREL24 10 /* # 0xa call <target> , not expandable */
69#define R_unusedb 11 /* # 0xb not generated */ 69#define R_BFIN_UNUSEDB 11 /* # 0xb not generated */
70#define R_unusedc 12 /* # 0xc not used */ 70#define R_BFIN_UNUSEDC 12 /* # 0xc not used */
71#define R_pcrel24_jump_l 13 /*0xd jump.l <target> */ 71#define R_BFIN_PCREL24_JUMP_L 13 /* 0xd jump.l <target> */
72#define R_pcrel24_call_x 14 /* 0xE, call.x <target> if <target> is above 24 bit limit call through P1 */ 72#define R_BFIN_PCREL24_CALL_X 14 /* 0xE, call.x <target> if <target> is above 24 bit limit call through P1 */
73#define R_var_eq_symb 15 /* 0xf, linker should treat it same as 0x12 */ 73#define R_BFIN_VAR_EQ_SYMB 15 /* 0xf, linker should treat it same as 0x12 */
74#define R_byte_data 16 /* 0x10, .byte var = symbol */ 74#define R_BFIN_BYTE_DATA 16 /* 0x10, .byte var = symbol */
75#define R_byte2_data 17 /* 0x11, .byte2 var = symbol */ 75#define R_BFIN_BYTE2_DATA 17 /* 0x11, .byte2 var = symbol */
76#define R_byte4_data 18 /* 0x12, .byte4 var = symbol and .var var=symbol */ 76#define R_BFIN_BYTE4_DATA 18 /* 0x12, .byte4 var = symbol and .var var=symbol */
77#define R_pcrel11 19 /* 0x13, lsetup part b */ 77#define R_BFIN_PCREL11 19 /* 0x13, lsetup part b */
78#define R_unused14 20 /* 0x14, undefined */ 78#define R_BFIN_UNUSED14 20 /* 0x14, undefined */
79#define R_unused15 21 /* not generated by VDSP 3.5 */ 79#define R_BFIN_UNUSED15 21 /* not generated by VDSP 3.5 */
80 80
81/* arithmetic relocations */ 81/* arithmetic relocations */
82#define R_push 0xE0 82#define R_BFIN_PUSH 0xE0
83#define R_const 0xE1 83#define R_BFIN_CONST 0xE1
84#define R_add 0xE2 84#define R_BFIN_ADD 0xE2
85#define R_sub 0xE3 85#define R_BFIN_SUB 0xE3
86#define R_mult 0xE4 86#define R_BFIN_MULT 0xE4
87#define R_div 0xE5 87#define R_BFIN_DIV 0xE5
88#define R_mod 0xE6 88#define R_BFIN_MOD 0xE6
89#define R_lshift 0xE7 89#define R_BFIN_LSHIFT 0xE7
90#define R_rshift 0xE8 90#define R_BFIN_RSHIFT 0xE8
91#define R_and 0xE9 91#define R_BFIN_AND 0xE9
92#define R_or 0xEA 92#define R_BFIN_OR 0xEA
93#define R_xor 0xEB 93#define R_BFIN_XOR 0xEB
94#define R_land 0xEC 94#define R_BFIN_LAND 0xEC
95#define R_lor 0xED 95#define R_BFIN_LOR 0xED
96#define R_len 0xEE 96#define R_BFIN_LEN 0xEE
97#define R_neg 0xEF 97#define R_BFIN_NEG 0xEF
98#define R_comp 0xF0 98#define R_BFIN_COMP 0xF0
99#define R_page 0xF1 99#define R_BFIN_PAGE 0xF1
100#define R_hwpage 0xF2 100#define R_BFIN_HWPAGE 0xF2
101#define R_addr 0xF3 101#define R_BFIN_ADDR 0xF3
102 102
103/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 103/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
104 use of this is to invoke "./ld.so someprog" to test out a new version of 104 use of this is to invoke "./ld.so someprog" to test out a new version of
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h
index b30a2968e274..ec58efc130e6 100644
--- a/arch/blackfin/include/asm/entry.h
+++ b/arch/blackfin/include/asm/entry.h
@@ -35,21 +35,39 @@
35#else 35#else
36# define LOAD_IPIPE_IPEND 36# define LOAD_IPIPE_IPEND
37#endif 37#endif
38
39#ifndef CONFIG_EXACT_HWERR
40/* As a debugging aid - we save IPEND when DEBUG_KERNEL is on,
41 * otherwise it is a waste of cycles.
42 */
43# ifndef CONFIG_DEBUG_KERNEL
44#define INTERRUPT_ENTRY(N) \
45 [--sp] = SYSCFG; \
46 [--sp] = P0; /*orig_p0*/ \
47 [--sp] = R0; /*orig_r0*/ \
48 [--sp] = (R7:0,P5:0); \
49 R0 = (N); \
50 LOAD_IPIPE_IPEND \
51 jump __common_int_entry;
52# else /* CONFIG_DEBUG_KERNEL */
38#define INTERRUPT_ENTRY(N) \ 53#define INTERRUPT_ENTRY(N) \
39 [--sp] = SYSCFG; \ 54 [--sp] = SYSCFG; \
40 \
41 [--sp] = P0; /*orig_p0*/ \ 55 [--sp] = P0; /*orig_p0*/ \
42 [--sp] = R0; /*orig_r0*/ \ 56 [--sp] = R0; /*orig_r0*/ \
43 [--sp] = (R7:0,P5:0); \ 57 [--sp] = (R7:0,P5:0); \
58 p0.l = lo(IPEND); \
59 p0.h = hi(IPEND); \
60 r1 = [p0]; \
44 R0 = (N); \ 61 R0 = (N); \
45 LOAD_IPIPE_IPEND \ 62 LOAD_IPIPE_IPEND \
46 jump __common_int_entry; 63 jump __common_int_entry;
64# endif /* CONFIG_DEBUG_KERNEL */
47 65
48/* For timer interrupts, we need to save IPEND, since the user_mode 66/* For timer interrupts, we need to save IPEND, since the user_mode
49 macro accesses it to determine where to account time. */ 67 *macro accesses it to determine where to account time.
68 */
50#define TIMER_INTERRUPT_ENTRY(N) \ 69#define TIMER_INTERRUPT_ENTRY(N) \
51 [--sp] = SYSCFG; \ 70 [--sp] = SYSCFG; \
52 \
53 [--sp] = P0; /*orig_p0*/ \ 71 [--sp] = P0; /*orig_p0*/ \
54 [--sp] = R0; /*orig_r0*/ \ 72 [--sp] = R0; /*orig_r0*/ \
55 [--sp] = (R7:0,P5:0); \ 73 [--sp] = (R7:0,P5:0); \
@@ -58,6 +76,74 @@
58 r1 = [p0]; \ 76 r1 = [p0]; \
59 R0 = (N); \ 77 R0 = (N); \
60 jump __common_int_entry; 78 jump __common_int_entry;
79#else /* CONFIG_EXACT_HWERR is defined */
80
81/* if we want hardware error to be exact, we need to do a SSYNC (which forces
82 * read/writes to complete to the memory controllers), and check to see that
83 * caused a pending HW error condition. If so, we assume it was caused by user
84 * space, by setting the same interrupt that we are in (so it goes off again)
85 * and context restore, and a RTI (without servicing anything). This should
86 * cause the pending HWERR to fire, and when that is done, this interrupt will
87 * be re-serviced properly.
88 * As you can see by the code - we actually need to do two SSYNCS - one to
89 * make sure the read/writes complete, and another to make sure the hardware
90 * error is recognized by the core.
91 */
92#define INTERRUPT_ENTRY(N) \
93 SSYNC; \
94 SSYNC; \
95 [--sp] = SYSCFG; \
96 [--sp] = P0; /*orig_p0*/ \
97 [--sp] = R0; /*orig_r0*/ \
98 [--sp] = (R7:0,P5:0); \
99 R1 = ASTAT; \
100 P0.L = LO(ILAT); \
101 P0.H = HI(ILAT); \
102 R0 = [P0]; \
103 CC = BITTST(R0, EVT_IVHW_P); \
104 IF CC JUMP 1f; \
105 ASTAT = R1; \
106 p0.l = lo(IPEND); \
107 p0.h = hi(IPEND); \
108 r1 = [p0]; \
109 R0 = (N); \
110 LOAD_IPIPE_IPEND \
111 jump __common_int_entry; \
1121: ASTAT = R1; \
113 RAISE N; \
114 (R7:0, P5:0) = [SP++]; \
115 SP += 0x8; \
116 SYSCFG = [SP++]; \
117 CSYNC; \
118 RTI;
119
120#define TIMER_INTERRUPT_ENTRY(N) \
121 SSYNC; \
122 SSYNC; \
123 [--sp] = SYSCFG; \
124 [--sp] = P0; /*orig_p0*/ \
125 [--sp] = R0; /*orig_r0*/ \
126 [--sp] = (R7:0,P5:0); \
127 R1 = ASTAT; \
128 P0.L = LO(ILAT); \
129 P0.H = HI(ILAT); \
130 R0 = [P0]; \
131 CC = BITTST(R0, EVT_IVHW_P); \
132 IF CC JUMP 1f; \
133 ASTAT = R1; \
134 p0.l = lo(IPEND); \
135 p0.h = hi(IPEND); \
136 r1 = [p0]; \
137 R0 = (N); \
138 jump __common_int_entry; \
1391: ASTAT = R1; \
140 RAISE N; \
141 (R7:0, P5:0) = [SP++]; \
142 SP += 0x8; \
143 SYSCFG = [SP++]; \
144 CSYNC; \
145 RTI;
146#endif /* CONFIG_EXACT_HWERR */
61 147
62/* This one pushes RETI without using CLI. Interrupts are enabled. */ 148/* This one pushes RETI without using CLI. Interrupts are enabled. */
63#define SAVE_CONTEXT_SYSCALL save_context_syscall 149#define SAVE_CONTEXT_SYSCALL save_context_syscall
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index b0f847ae4bf4..89f08decb8e0 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -30,6 +30,7 @@
30# else 30# else
31# define MAX_BLACKFIN_GPTIMERS 11 31# define MAX_BLACKFIN_GPTIMERS 11
32# define TIMER8_GROUP_REG TIMER_ENABLE1 32# define TIMER8_GROUP_REG TIMER_ENABLE1
33# define TIMER_GROUP2 1
33# endif 34# endif
34# define TIMER0_GROUP_REG TIMER_ENABLE0 35# define TIMER0_GROUP_REG TIMER_ENABLE0
35#endif 36#endif
@@ -40,10 +41,12 @@
40# define MAX_BLACKFIN_GPTIMERS 12 41# define MAX_BLACKFIN_GPTIMERS 12
41# define TIMER0_GROUP_REG TMRS8_ENABLE 42# define TIMER0_GROUP_REG TMRS8_ENABLE
42# define TIMER8_GROUP_REG TMRS4_ENABLE 43# define TIMER8_GROUP_REG TMRS4_ENABLE
44# define TIMER_GROUP2 1
43#endif 45#endif
44/* 46/*
45 * All others: 3 timers: 47 * All others: 3 timers:
46 */ 48 */
49#define TIMER_GROUP1 0
47#if !defined(MAX_BLACKFIN_GPTIMERS) 50#if !defined(MAX_BLACKFIN_GPTIMERS)
48# define MAX_BLACKFIN_GPTIMERS 3 51# define MAX_BLACKFIN_GPTIMERS 3
49# define TIMER0_GROUP_REG TIMER_ENABLE 52# define TIMER0_GROUP_REG TIMER_ENABLE
@@ -109,8 +112,8 @@
109#define TIMER_ERR_PROG_PER 0x8000 112#define TIMER_ERR_PROG_PER 0x8000
110#define TIMER_ERR_PROG_PW 0xC000 113#define TIMER_ERR_PROG_PW 0xC000
111#define TIMER_EMU_RUN 0x0200 114#define TIMER_EMU_RUN 0x0200
112#define TIMER_TOGGLE_HI 0x0100 115#define TIMER_TOGGLE_HI 0x0100
113#define TIMER_CLK_SEL 0x0080 116#define TIMER_CLK_SEL 0x0080
114#define TIMER_OUT_DIS 0x0040 117#define TIMER_OUT_DIS 0x0040
115#define TIMER_TIN_SEL 0x0020 118#define TIMER_TIN_SEL 0x0020
116#define TIMER_IRQ_ENA 0x0010 119#define TIMER_IRQ_ENA 0x0010
@@ -169,23 +172,25 @@
169 172
170/* The actual gptimer API */ 173/* The actual gptimer API */
171 174
172void set_gptimer_pwidth (int timer_id, uint32_t width); 175void set_gptimer_pwidth(int timer_id, uint32_t width);
173uint32_t get_gptimer_pwidth (int timer_id); 176uint32_t get_gptimer_pwidth(int timer_id);
174void set_gptimer_period (int timer_id, uint32_t period); 177void set_gptimer_period(int timer_id, uint32_t period);
175uint32_t get_gptimer_period (int timer_id); 178uint32_t get_gptimer_period(int timer_id);
176uint32_t get_gptimer_count (int timer_id); 179uint32_t get_gptimer_count(int timer_id);
177uint16_t get_gptimer_intr (int timer_id); 180int get_gptimer_intr(int timer_id);
178void clear_gptimer_intr (int timer_id); 181void clear_gptimer_intr(int timer_id);
179uint16_t get_gptimer_over (int timer_id); 182int get_gptimer_over(int timer_id);
180void clear_gptimer_over (int timer_id); 183void clear_gptimer_over(int timer_id);
181void set_gptimer_config (int timer_id, uint16_t config); 184void set_gptimer_config(int timer_id, uint16_t config);
182uint16_t get_gptimer_config (int timer_id); 185uint16_t get_gptimer_config(int timer_id);
183void set_gptimer_pulse_hi (int timer_id); 186int get_gptimer_run(int timer_id);
187void set_gptimer_pulse_hi(int timer_id);
184void clear_gptimer_pulse_hi(int timer_id); 188void clear_gptimer_pulse_hi(int timer_id);
185void enable_gptimers (uint16_t mask); 189void enable_gptimers(uint16_t mask);
186void disable_gptimers (uint16_t mask); 190void disable_gptimers(uint16_t mask);
187uint16_t get_enabled_gptimers (void); 191void disable_gptimers_sync(uint16_t mask);
188uint32_t get_gptimer_status (int group); 192uint16_t get_enabled_gptimers(void);
189void set_gptimer_status (int group, uint32_t value); 193uint32_t get_gptimer_status(int group);
194void set_gptimer_status(int group, uint32_t value);
190 195
191#endif 196#endif
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 63b2d8c78570..3022b5c96b37 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -80,19 +80,22 @@ static inline unsigned int readl(const volatile void __iomem *addr)
80#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 80#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
81#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 81#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
82 82
83#define inb(addr) readb(addr) 83/* Convert "I/O port addresses" to actual addresses. i.e. ugly casts. */
84#define inw(addr) readw(addr) 84#define __io(port) ((void *)(unsigned long)(port))
85#define inl(addr) readl(addr) 85
86#define outb(x,addr) ((void) writeb(x,addr)) 86#define inb(port) readb(__io(port))
87#define outw(x,addr) ((void) writew(x,addr)) 87#define inw(port) readw(__io(port))
88#define outl(x,addr) ((void) writel(x,addr)) 88#define inl(port) readl(__io(port))
89 89#define outb(x,port) writeb(x,__io(port))
90#define inb_p(addr) inb(addr) 90#define outw(x,port) writew(x,__io(port))
91#define inw_p(addr) inw(addr) 91#define outl(x,port) writel(x,__io(port))
92#define inl_p(addr) inl(addr) 92
93#define outb_p(x,addr) outb(x,addr) 93#define inb_p(port) inb(__io(port))
94#define outw_p(x,addr) outw(x,addr) 94#define inw_p(port) inw(__io(port))
95#define outl_p(x,addr) outl(x,addr) 95#define inl_p(port) inl(__io(port))
96#define outb_p(x,port) outb(x,__io(port))
97#define outw_p(x,port) outw(x,__io(port))
98#define outl_p(x,port) outl(x,__io(port))
96 99
97#define ioread8_rep(a,d,c) readsb(a,d,c) 100#define ioread8_rep(a,d,c) readsb(a,d,c)
98#define ioread16_rep(a,d,c) readsw(a,d,c) 101#define ioread16_rep(a,d,c) readsw(a,d,c)
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 343b56361ec9..51d0bf5e2899 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,10 +35,10 @@
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/traps.h> 36#include <asm/traps.h>
37 37
38#define IPIPE_ARCH_STRING "1.9-00" 38#define IPIPE_ARCH_STRING "1.9-01"
39#define IPIPE_MAJOR_NUMBER 1 39#define IPIPE_MAJOR_NUMBER 1
40#define IPIPE_MINOR_NUMBER 9 40#define IPIPE_MINOR_NUMBER 9
41#define IPIPE_PATCH_NUMBER 0 41#define IPIPE_PATCH_NUMBER 1
42 42
43#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
44#error "I-pipe/blackfin: SMP not implemented" 44#error "I-pipe/blackfin: SMP not implemented"
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
index a67142740df0..b42555c1431c 100644
--- a/arch/blackfin/include/asm/pda.h
+++ b/arch/blackfin/include/asm/pda.h
@@ -64,8 +64,6 @@ struct blackfin_pda { /* Per-processor Data Area */
64 64
65extern struct blackfin_pda cpu_pda[]; 65extern struct blackfin_pda cpu_pda[];
66 66
67void reserve_pda(void);
68
69#endif /* __ASSEMBLY__ */ 67#endif /* __ASSEMBLY__ */
70 68
71#endif /* _ASM_BLACKFIN_PDA_H */ 69#endif /* _ASM_BLACKFIN_PDA_H */
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index 0eece23b41c7..3040415523b2 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -131,8 +131,8 @@ unsigned long get_wchan(struct task_struct *p);
131/* Get the Silicon Revision of the chip */ 131/* Get the Silicon Revision of the chip */
132static inline uint32_t __pure bfin_revid(void) 132static inline uint32_t __pure bfin_revid(void)
133{ 133{
134 /* stored in the upper 4 bits */ 134 /* Always use CHIPID, to work around ANOMALY_05000234 */
135 uint32_t revid = bfin_read_CHIPID() >> 28; 135 uint32_t revid = (bfin_read_CHIPID() & CHIPID_VERSION) >> 28;
136 136
137#ifdef CONFIG_BF52x 137#ifdef CONFIG_BF52x
138 /* ANOMALY_05000357 138 /* ANOMALY_05000357
diff --git a/arch/blackfin/include/asm/time.h b/arch/blackfin/include/asm/time.h
index ddc43ce38533..589e937ed1eb 100644
--- a/arch/blackfin/include/asm/time.h
+++ b/arch/blackfin/include/asm/time.h
@@ -37,4 +37,5 @@ extern unsigned long long __bfin_cycles_off;
37extern unsigned int __bfin_cycles_mod; 37extern unsigned int __bfin_cycles_mod;
38#endif 38#endif
39 39
40extern void __init setup_core_timer(void);
40#endif 41#endif
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 3248033531e6..8894e9ffbb57 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -59,12 +59,8 @@ static inline int is_in_rom(unsigned long addr)
59#ifndef CONFIG_ACCESS_CHECK 59#ifndef CONFIG_ACCESS_CHECK
60static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; } 60static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; }
61#else 61#else
62#ifdef CONFIG_ACCESS_OK_L1
63extern int _access_ok(unsigned long addr, unsigned long size)__attribute__((l1_text));
64#else
65extern int _access_ok(unsigned long addr, unsigned long size); 62extern int _access_ok(unsigned long addr, unsigned long size);
66#endif 63#endif
67#endif
68 64
69/* 65/*
70 * The exception table consists of pairs of addresses: the first is the 66 * The exception table consists of pairs of addresses: the first is the
@@ -83,9 +79,6 @@ struct exception_table_entry {
83 unsigned long insn, fixup; 79 unsigned long insn, fixup;
84}; 80};
85 81
86/* Returns 0 if exception not found and fixup otherwise. */
87extern unsigned long search_exception_table(unsigned long);
88
89/* 82/*
90 * These are the main single-value transfer routines. They automatically 83 * These are the main single-value transfer routines. They automatically
91 * use the right size if we just have the right pointer type. 84 * use the right size if we just have the right pointer type.
@@ -233,16 +226,29 @@ strncpy_from_user(char *dst, const char *src, long count)
233} 226}
234 227
235/* 228/*
236 * Return the size of a string (including the ending 0) 229 * Get the size of a string in user space.
230 * src: The string to measure
231 * n: The maximum valid length
237 * 232 *
238 * Return 0 on exception, a value greater than N if too long 233 * Get the size of a NUL-terminated string in user space.
234 *
235 * Returns the size of the string INCLUDING the terminating NUL.
236 * On exception, returns 0.
237 * If the string is too long, returns a value greater than n.
239 */ 238 */
240static inline long strnlen_user(const char *src, long n) 239static inline long __must_check strnlen_user(const char *src, long n)
241{ 240{
242 return (strlen(src) + 1); 241 if (!access_ok(VERIFY_READ, src, 1))
242 return 0;
243 return strnlen(src, n) + 1;
243} 244}
244 245
245#define strlen_user(str) strnlen_user(str, 32767) 246static inline long __must_check strlen_user(const char *src)
247{
248 if (!access_ok(VERIFY_READ, src, 1))
249 return 0;
250 return strlen(src) + 1;
251}
246 252
247/* 253/*
248 * Zero Userspace 254 * Zero Userspace
@@ -251,6 +257,8 @@ static inline long strnlen_user(const char *src, long n)
251static inline unsigned long __must_check 257static inline unsigned long __must_check
252__clear_user(void *to, unsigned long n) 258__clear_user(void *to, unsigned long n)
253{ 259{
260 if (!access_ok(VERIFY_WRITE, to, n))
261 return n;
254 memset(to, 0, n); 262 memset(to, 0, n);
255 return 0; 263 return 0;
256} 264}
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 8531693fb48d..763ed84ba459 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -20,6 +20,11 @@
20#include <asm/dma.h> 20#include <asm/dma.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23/*
24 * To make sure we work around 05000119 - we always check DMA_DONE bit,
25 * never the DMA_RUN bit
26 */
27
23struct dma_channel dma_ch[MAX_DMA_CHANNELS]; 28struct dma_channel dma_ch[MAX_DMA_CHANNELS];
24EXPORT_SYMBOL(dma_ch); 29EXPORT_SYMBOL(dma_ch);
25 30
@@ -232,6 +237,87 @@ void blackfin_dma_resume(void)
232void __init blackfin_dma_early_init(void) 237void __init blackfin_dma_early_init(void)
233{ 238{
234 bfin_write_MDMA_S0_CONFIG(0); 239 bfin_write_MDMA_S0_CONFIG(0);
240 bfin_write_MDMA_S1_CONFIG(0);
241}
242
243void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
244{
245 unsigned long dst = (unsigned long)pdst;
246 unsigned long src = (unsigned long)psrc;
247 struct dma_register *dst_ch, *src_ch;
248
249 /* We assume that everything is 4 byte aligned, so include
250 * a basic sanity check
251 */
252 BUG_ON(dst % 4);
253 BUG_ON(src % 4);
254 BUG_ON(size % 4);
255
256 /* Force a sync in case a previous config reset on this channel
257 * occurred. This is needed so subsequent writes to DMA registers
258 * are not spuriously lost/corrupted.
259 */
260 __builtin_bfin_ssync();
261
262 src_ch = 0;
263 /* Find an avalible memDMA channel */
264 while (1) {
265 if (!src_ch || src_ch == (struct dma_register *)MDMA_S1_NEXT_DESC_PTR) {
266 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
267 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
268 } else {
269 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
270 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
271 }
272
273 if (!bfin_read16(&src_ch->cfg)) {
274 break;
275 } else {
276 if (bfin_read16(&src_ch->irq_status) & DMA_DONE)
277 bfin_write16(&src_ch->cfg, 0);
278 }
279
280 }
281
282 /* Destination */
283 bfin_write32(&dst_ch->start_addr, dst);
284 bfin_write16(&dst_ch->x_count, size >> 2);
285 bfin_write16(&dst_ch->x_modify, 1 << 2);
286 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
287
288 /* Source */
289 bfin_write32(&src_ch->start_addr, src);
290 bfin_write16(&src_ch->x_count, size >> 2);
291 bfin_write16(&src_ch->x_modify, 1 << 2);
292 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
293
294 /* Enable */
295 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
296 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
297
298 /* Since we are atomic now, don't use the workaround ssync */
299 __builtin_bfin_ssync();
300}
301
302void __init early_dma_memcpy_done(void)
303{
304 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
305 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
306 continue;
307
308 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
309 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
310 /*
311 * Now that DMA is done, we would normally flush cache, but
312 * i/d cache isn't running this early, so we don't bother,
313 * and just clear out the DMA channel for next time
314 */
315 bfin_write_MDMA_S0_CONFIG(0);
316 bfin_write_MDMA_S1_CONFIG(0);
317 bfin_write_MDMA_D0_CONFIG(0);
318 bfin_write_MDMA_D1_CONFIG(0);
319
320 __builtin_bfin_ssync();
235} 321}
236 322
237/** 323/**
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index a0678da40532..beffa00a93c3 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -313,15 +313,6 @@ inline void portmux_setup(unsigned short per)
313# define portmux_setup(...) do { } while (0) 313# define portmux_setup(...) do { } while (0)
314#endif 314#endif
315 315
316static int __init bfin_gpio_init(void)
317{
318 printk(KERN_INFO "Blackfin GPIO Controller\n");
319
320 return 0;
321}
322arch_initcall(bfin_gpio_init);
323
324
325#ifndef CONFIG_BF54x 316#ifndef CONFIG_BF54x
326/*********************************************************** 317/***********************************************************
327* 318*
@@ -1021,15 +1012,6 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label)
1021 1012
1022 local_irq_save_hw(flags); 1013 local_irq_save_hw(flags);
1023 1014
1024 if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1025 if (system_state == SYSTEM_BOOTING)
1026 dump_stack();
1027 printk(KERN_ERR
1028 "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n",
1029 gpio);
1030 local_irq_restore_hw(flags);
1031 return -EBUSY;
1032 }
1033 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { 1015 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1034 if (system_state == SYSTEM_BOOTING) 1016 if (system_state == SYSTEM_BOOTING)
1035 dump_stack(); 1017 dump_stack();
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 01f917d58b59..53e893ff708a 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -16,7 +16,6 @@ EXPORT_SYMBOL(bfin_return_from_exception);
16 16
17/* All the Blackfin cache functions: mach-common/cache.S */ 17/* All the Blackfin cache functions: mach-common/cache.S */
18EXPORT_SYMBOL(blackfin_dcache_invalidate_range); 18EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
19EXPORT_SYMBOL(blackfin_icache_dcache_flush_range);
20EXPORT_SYMBOL(blackfin_icache_flush_range); 19EXPORT_SYMBOL(blackfin_icache_flush_range);
21EXPORT_SYMBOL(blackfin_dcache_flush_range); 20EXPORT_SYMBOL(blackfin_dcache_flush_range);
22EXPORT_SYMBOL(blackfin_dflush_page); 21EXPORT_SYMBOL(blackfin_dflush_page);
diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
index c6ff947f9d37..d5a86c3017f7 100644
--- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
@@ -55,7 +55,14 @@ void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
55 } 55 }
56 56
57 ctrl = bfin_read_DMEM_CONTROL(); 57 ctrl = bfin_read_DMEM_CONTROL();
58 ctrl |= DMEM_CNTR; 58
59 /*
60 * Anomaly notes:
61 * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
62 * register, so that the port preferences for DAG0 and DAG1 are set
63 * to port B
64 */
65 ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
59 bfin_write_DMEM_CONTROL(ctrl); 66 bfin_write_DMEM_CONTROL(ctrl);
60 SSYNC(); 67 SSYNC();
61} 68}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 3e329a6ce041..c006a44527bf 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -64,7 +64,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
64 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; 64 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
65 65
66 icplb_tbl[cpu][i_i].addr = 0; 66 icplb_tbl[cpu][i_i].addr = 0;
67 icplb_tbl[cpu][i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; 67 icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB;
68 68
69 /* Cover kernel memory with 4M pages. */ 69 /* Cover kernel memory with 4M pages. */
70 addr = 0; 70 addr = 0;
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
index c6ff947f9d37..d5a86c3017f7 100644
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
@@ -55,7 +55,14 @@ void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
55 } 55 }
56 56
57 ctrl = bfin_read_DMEM_CONTROL(); 57 ctrl = bfin_read_DMEM_CONTROL();
58 ctrl |= DMEM_CNTR; 58
59 /*
60 * Anomaly notes:
61 * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
62 * register, so that the port preferences for DAG0 and DAG1 are set
63 * to port B
64 */
65 ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
59 bfin_write_DMEM_CONTROL(ctrl); 66 bfin_write_DMEM_CONTROL(ctrl);
60 SSYNC(); 67 SSYNC();
61} 68}
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index c8ad051742e2..3302719173ca 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -178,25 +178,15 @@ int __init setup_early_printk(char *buf)
178 178
179asmlinkage void __init init_early_exception_vectors(void) 179asmlinkage void __init init_early_exception_vectors(void)
180{ 180{
181 u32 evt;
181 SSYNC(); 182 SSYNC();
182 183
183 /* cannot program in software: 184 /* cannot program in software:
184 * evt0 - emulation (jtag) 185 * evt0 - emulation (jtag)
185 * evt1 - reset 186 * evt1 - reset
186 */ 187 */
187 bfin_write_EVT2(early_trap); 188 for (evt = EVT2; evt <= EVT15; evt += 4)
188 bfin_write_EVT3(early_trap); 189 bfin_write32(evt, early_trap);
189 bfin_write_EVT5(early_trap);
190 bfin_write_EVT6(early_trap);
191 bfin_write_EVT7(early_trap);
192 bfin_write_EVT8(early_trap);
193 bfin_write_EVT9(early_trap);
194 bfin_write_EVT10(early_trap);
195 bfin_write_EVT11(early_trap);
196 bfin_write_EVT12(early_trap);
197 bfin_write_EVT13(early_trap);
198 bfin_write_EVT14(early_trap);
199 bfin_write_EVT15(early_trap);
200 CSYNC(); 190 CSYNC();
201 191
202 /* Set all the return from interrupt, exception, NMI to a known place 192 /* Set all the return from interrupt, exception, NMI to a known place
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 3a3e9615b002..7281a91d26b5 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -189,10 +189,10 @@ void set_gptimer_status(int group, uint32_t value)
189} 189}
190EXPORT_SYMBOL(set_gptimer_status); 190EXPORT_SYMBOL(set_gptimer_status);
191 191
192uint16_t get_gptimer_intr(int timer_id) 192int get_gptimer_intr(int timer_id)
193{ 193{
194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
195 return (group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]) ? 1 : 0; 195 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]);
196} 196}
197EXPORT_SYMBOL(get_gptimer_intr); 197EXPORT_SYMBOL(get_gptimer_intr);
198 198
@@ -203,10 +203,10 @@ void clear_gptimer_intr(int timer_id)
203} 203}
204EXPORT_SYMBOL(clear_gptimer_intr); 204EXPORT_SYMBOL(clear_gptimer_intr);
205 205
206uint16_t get_gptimer_over(int timer_id) 206int get_gptimer_over(int timer_id)
207{ 207{
208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
209 return (group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]) ? 1 : 0; 209 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]);
210} 210}
211EXPORT_SYMBOL(get_gptimer_over); 211EXPORT_SYMBOL(get_gptimer_over);
212 212
@@ -217,6 +217,13 @@ void clear_gptimer_over(int timer_id)
217} 217}
218EXPORT_SYMBOL(clear_gptimer_over); 218EXPORT_SYMBOL(clear_gptimer_over);
219 219
220int get_gptimer_run(int timer_id)
221{
222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
223 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]);
224}
225EXPORT_SYMBOL(get_gptimer_run);
226
220void set_gptimer_config(int timer_id, uint16_t config) 227void set_gptimer_config(int timer_id, uint16_t config)
221{ 228{
222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 229 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
@@ -244,7 +251,7 @@ void enable_gptimers(uint16_t mask)
244} 251}
245EXPORT_SYMBOL(enable_gptimers); 252EXPORT_SYMBOL(enable_gptimers);
246 253
247void disable_gptimers(uint16_t mask) 254static void _disable_gptimers(uint16_t mask)
248{ 255{
249 int i; 256 int i;
250 uint16_t m = mask; 257 uint16_t m = mask;
@@ -253,6 +260,12 @@ void disable_gptimers(uint16_t mask)
253 group_regs[i]->disable = m & 0xFF; 260 group_regs[i]->disable = m & 0xFF;
254 m >>= 8; 261 m >>= 8;
255 } 262 }
263}
264
265void disable_gptimers(uint16_t mask)
266{
267 int i;
268 _disable_gptimers(mask);
256 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) 269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
257 if (mask & (1 << i)) 270 if (mask & (1 << i))
258 group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; 271 group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
@@ -260,6 +273,13 @@ void disable_gptimers(uint16_t mask)
260} 273}
261EXPORT_SYMBOL(disable_gptimers); 274EXPORT_SYMBOL(disable_gptimers);
262 275
276void disable_gptimers_sync(uint16_t mask)
277{
278 _disable_gptimers(mask);
279 SSYNC();
280}
281EXPORT_SYMBOL(disable_gptimers_sync);
282
263void set_gptimer_pulse_hi(int timer_id) 283void set_gptimer_pulse_hi(int timer_id)
264{ 284{
265 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 285 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index a5de8d45424c..5fc424803a17 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -167,7 +167,7 @@ int __ipipe_check_root(void)
167void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 167void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
168{ 168{
169 struct irq_desc *desc = irq_to_desc(irq); 169 struct irq_desc *desc = irq_to_desc(irq);
170 int prio = desc->ic_prio; 170 int prio = __ipipe_get_irq_priority(irq);
171 171
172 desc->depth = 0; 172 desc->depth = 0;
173 if (ipd != &ipipe_root && 173 if (ipd != &ipipe_root &&
@@ -178,8 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
178 178
179void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 179void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
180{ 180{
181 struct irq_desc *desc = irq_to_desc(irq); 181 int prio = __ipipe_get_irq_priority(irq);
182 int prio = desc->ic_prio;
183 182
184 if (ipd != &ipipe_root && 183 if (ipd != &ipipe_root &&
185 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio])) 184 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
@@ -310,12 +309,16 @@ int ipipe_trigger_irq(unsigned irq)
310 309
311asmlinkage void __ipipe_sync_root(void) 310asmlinkage void __ipipe_sync_root(void)
312{ 311{
312 void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
313 unsigned long flags; 313 unsigned long flags;
314 314
315 BUG_ON(irqs_disabled()); 315 BUG_ON(irqs_disabled());
316 316
317 local_irq_save_hw(flags); 317 local_irq_save_hw(flags);
318 318
319 if (irq_tail_hook)
320 irq_tail_hook();
321
319 clear_thread_flag(TIF_IRQ_SYNC); 322 clear_thread_flag(TIF_IRQ_SYNC);
320 323
321 if (ipipe_root_cpudom_var(irqpend_himask) != 0) 324 if (ipipe_root_cpudom_var(irqpend_himask) != 0)
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 401bd32aa499..6e31e935bb31 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -59,12 +59,14 @@ static struct irq_chip bad_chip = {
59 .unmask = dummy_mask_unmask_irq, 59 .unmask = dummy_mask_unmask_irq,
60}; 60};
61 61
62static int bad_stats;
62static struct irq_desc bad_irq_desc = { 63static struct irq_desc bad_irq_desc = {
63 .status = IRQ_DISABLED, 64 .status = IRQ_DISABLED,
64 .chip = &bad_chip, 65 .chip = &bad_chip,
65 .handle_irq = handle_bad_irq, 66 .handle_irq = handle_bad_irq,
66 .depth = 1, 67 .depth = 1,
67 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 68 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
69 .kstat_irqs = &bad_stats,
68#ifdef CONFIG_SMP 70#ifdef CONFIG_SMP
69 .affinity = CPU_MASK_ALL 71 .affinity = CPU_MASK_ALL
70#endif 72#endif
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index b163f6d3330d..da28f796ad78 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -466,7 +466,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
466 int cpu = raw_smp_processor_id(); 466 int cpu = raw_smp_processor_id();
467 467
468 if (size < 0) 468 if (size < 0)
469 return EFAULT; 469 return -EFAULT;
470 if (addr >= 0x1000 && (addr + size) <= physical_mem_end) 470 if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
471 return 0; 471 return 0;
472 if (addr >= SYSMMR_BASE) 472 if (addr >= SYSMMR_BASE)
@@ -498,7 +498,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
498 if (IN_MEM(addr, size, L2_START, L2_LENGTH)) 498 if (IN_MEM(addr, size, L2_START, L2_LENGTH))
499 return 0; 499 return 0;
500 500
501 return EFAULT; 501 return -EFAULT;
502} 502}
503 503
504/* 504/*
@@ -508,14 +508,15 @@ static int validate_memory_access_address(unsigned long addr, int size)
508int kgdb_mem2hex(char *mem, char *buf, int count) 508int kgdb_mem2hex(char *mem, char *buf, int count)
509{ 509{
510 char *tmp; 510 char *tmp;
511 int err = 0; 511 int err;
512 unsigned char *pch; 512 unsigned char *pch;
513 unsigned short mmr16; 513 unsigned short mmr16;
514 unsigned long mmr32; 514 unsigned long mmr32;
515 int cpu = raw_smp_processor_id(); 515 int cpu = raw_smp_processor_id();
516 516
517 if (validate_memory_access_address((unsigned long)mem, count)) 517 err = validate_memory_access_address((unsigned long)mem, count);
518 return EFAULT; 518 if (err)
519 return err;
519 520
520 /* 521 /*
521 * We use the upper half of buf as an intermediate buffer for the 522 * We use the upper half of buf as an intermediate buffer for the
@@ -533,7 +534,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
533 *tmp++ = *pch++; 534 *tmp++ = *pch++;
534 tmp -= 2; 535 tmp -= 2;
535 } else 536 } else
536 err = EFAULT; 537 err = -EFAULT;
537 break; 538 break;
538 case 4: 539 case 4:
539 if ((unsigned int)mem % 4 == 0) { 540 if ((unsigned int)mem % 4 == 0) {
@@ -545,10 +546,10 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
545 *tmp++ = *pch++; 546 *tmp++ = *pch++;
546 tmp -= 4; 547 tmp -= 4;
547 } else 548 } else
548 err = EFAULT; 549 err = -EFAULT;
549 break; 550 break;
550 default: 551 default:
551 err = EFAULT; 552 err = -EFAULT;
552 } 553 }
553 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) 554 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
554#ifdef CONFIG_SMP 555#ifdef CONFIG_SMP
@@ -557,7 +558,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
557 ) { 558 ) {
558 /* access L1 instruction SRAM*/ 559 /* access L1 instruction SRAM*/
559 if (dma_memcpy(tmp, mem, count) == NULL) 560 if (dma_memcpy(tmp, mem, count) == NULL)
560 err = EFAULT; 561 err = -EFAULT;
561 } else 562 } else
562 err = probe_kernel_read(tmp, mem, count); 563 err = probe_kernel_read(tmp, mem, count);
563 564
@@ -585,24 +586,24 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
585 char *tmp_new; 586 char *tmp_new;
586 unsigned short *mmr16; 587 unsigned short *mmr16;
587 unsigned long *mmr32; 588 unsigned long *mmr32;
588 int err = 0; 589 int err;
589 int size = 0; 590 int size;
590 int cpu = raw_smp_processor_id(); 591 int cpu = raw_smp_processor_id();
591 592
592 tmp_old = tmp_new = buf; 593 tmp_old = tmp_new = buf;
593 594
594 while (count-- > 0) { 595 for (size = 0; size < count; ++size) {
595 if (*tmp_old == 0x7d) 596 if (*tmp_old == 0x7d)
596 *tmp_new = *(++tmp_old) ^ 0x20; 597 *tmp_new = *(++tmp_old) ^ 0x20;
597 else 598 else
598 *tmp_new = *tmp_old; 599 *tmp_new = *tmp_old;
599 tmp_new++; 600 tmp_new++;
600 tmp_old++; 601 tmp_old++;
601 size++;
602 } 602 }
603 603
604 if (validate_memory_access_address((unsigned long)mem, size)) 604 err = validate_memory_access_address((unsigned long)mem, size);
605 return EFAULT; 605 if (err)
606 return err;
606 607
607 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ 608 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
608 switch (size) { 609 switch (size) {
@@ -611,17 +612,17 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
611 mmr16 = (unsigned short *)buf; 612 mmr16 = (unsigned short *)buf;
612 *(unsigned short *)mem = *mmr16; 613 *(unsigned short *)mem = *mmr16;
613 } else 614 } else
614 return EFAULT; 615 err = -EFAULT;
615 break; 616 break;
616 case 4: 617 case 4:
617 if ((unsigned int)mem % 4 == 0) { 618 if ((unsigned int)mem % 4 == 0) {
618 mmr32 = (unsigned long *)buf; 619 mmr32 = (unsigned long *)buf;
619 *(unsigned long *)mem = *mmr32; 620 *(unsigned long *)mem = *mmr32;
620 } else 621 } else
621 return EFAULT; 622 err = -EFAULT;
622 break; 623 break;
623 default: 624 default:
624 return EFAULT; 625 err = -EFAULT;
625 } 626 }
626 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) 627 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
627#ifdef CONFIG_SMP 628#ifdef CONFIG_SMP
@@ -630,7 +631,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
630 ) { 631 ) {
631 /* access L1 instruction SRAM */ 632 /* access L1 instruction SRAM */
632 if (dma_memcpy(mem, buf, size) == NULL) 633 if (dma_memcpy(mem, buf, size) == NULL)
633 err = EFAULT; 634 err = -EFAULT;
634 } else 635 } else
635 err = probe_kernel_write(mem, buf, size); 636 err = probe_kernel_write(mem, buf, size);
636 637
@@ -648,10 +649,12 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
648 char *tmp_hex; 649 char *tmp_hex;
649 unsigned short *mmr16; 650 unsigned short *mmr16;
650 unsigned long *mmr32; 651 unsigned long *mmr32;
652 int err;
651 int cpu = raw_smp_processor_id(); 653 int cpu = raw_smp_processor_id();
652 654
653 if (validate_memory_access_address((unsigned long)mem, count)) 655 err = validate_memory_access_address((unsigned long)mem, count);
654 return EFAULT; 656 if (err)
657 return err;
655 658
656 /* 659 /*
657 * We use the upper half of buf as an intermediate buffer for the 660 * We use the upper half of buf as an intermediate buffer for the
@@ -673,17 +676,17 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
673 mmr16 = (unsigned short *)tmp_raw; 676 mmr16 = (unsigned short *)tmp_raw;
674 *(unsigned short *)mem = *mmr16; 677 *(unsigned short *)mem = *mmr16;
675 } else 678 } else
676 return EFAULT; 679 err = -EFAULT;
677 break; 680 break;
678 case 4: 681 case 4:
679 if ((unsigned int)mem % 4 == 0) { 682 if ((unsigned int)mem % 4 == 0) {
680 mmr32 = (unsigned long *)tmp_raw; 683 mmr32 = (unsigned long *)tmp_raw;
681 *(unsigned long *)mem = *mmr32; 684 *(unsigned long *)mem = *mmr32;
682 } else 685 } else
683 return EFAULT; 686 err = -EFAULT;
684 break; 687 break;
685 default: 688 default:
686 return EFAULT; 689 err = -EFAULT;
687 } 690 }
688 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) 691 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
689#ifdef CONFIG_SMP 692#ifdef CONFIG_SMP
@@ -692,10 +695,11 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
692 ) { 695 ) {
693 /* access L1 instruction SRAM */ 696 /* access L1 instruction SRAM */
694 if (dma_memcpy(mem, tmp_raw, count) == NULL) 697 if (dma_memcpy(mem, tmp_raw, count) == NULL)
695 return EFAULT; 698 err = -EFAULT;
696 } else 699 } else
697 return probe_kernel_write(mem, tmp_raw, count); 700 err = probe_kernel_write(mem, tmp_raw, count);
698 return 0; 701
702 return err;
699} 703}
700 704
701int kgdb_validate_break_address(unsigned long addr) 705int kgdb_validate_break_address(unsigned long addr)
@@ -715,7 +719,7 @@ int kgdb_validate_break_address(unsigned long addr)
715 if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH)) 719 if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH))
716 return 0; 720 return 0;
717 721
718 return EFAULT; 722 return -EFAULT;
719} 723}
720 724
721int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 725int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index 1bd7f2d018a8..d5aee3626688 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -201,8 +201,8 @@ apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
201/* Arithmetic relocations are handled. */ 201/* Arithmetic relocations are handled. */
202/* We do not expect LSETUP to be split and hence is not */ 202/* We do not expect LSETUP to be split and hence is not */
203/* handled. */ 203/* handled. */
204/* R_byte and R_byte2 are also not handled as the gas */ 204/* R_BFIN_BYTE and R_BFIN_BYTE2 are also not handled as the */
205/* does not generate it. */ 205/* gas does not generate it. */
206/*************************************************************************/ 206/*************************************************************************/
207int 207int
208apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, 208apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
@@ -243,8 +243,8 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
243#endif 243#endif
244 switch (ELF32_R_TYPE(rel[i].r_info)) { 244 switch (ELF32_R_TYPE(rel[i].r_info)) {
245 245
246 case R_pcrel24: 246 case R_BFIN_PCREL24:
247 case R_pcrel24_jump_l: 247 case R_BFIN_PCREL24_JUMP_L:
248 /* Add the value, subtract its postition */ 248 /* Add the value, subtract its postition */
249 location16 = 249 location16 =
250 (uint16_t *) (sechdrs[sechdrs[relsec].sh_info]. 250 (uint16_t *) (sechdrs[sechdrs[relsec].sh_info].
@@ -266,18 +266,18 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
266 (*location16 & 0xff00) | (value >> 16 & 0x00ff); 266 (*location16 & 0xff00) | (value >> 16 & 0x00ff);
267 *(location16 + 1) = value & 0xffff; 267 *(location16 + 1) = value & 0xffff;
268 break; 268 break;
269 case R_pcrel12_jump: 269 case R_BFIN_PCREL12_JUMP:
270 case R_pcrel12_jump_s: 270 case R_BFIN_PCREL12_JUMP_S:
271 value -= (uint32_t) location32; 271 value -= (uint32_t) location32;
272 value >>= 1; 272 value >>= 1;
273 *location16 = (value & 0xfff); 273 *location16 = (value & 0xfff);
274 break; 274 break;
275 case R_pcrel10: 275 case R_BFIN_PCREL10:
276 value -= (uint32_t) location32; 276 value -= (uint32_t) location32;
277 value >>= 1; 277 value >>= 1;
278 *location16 = (value & 0x3ff); 278 *location16 = (value & 0x3ff);
279 break; 279 break;
280 case R_luimm16: 280 case R_BFIN_LUIMM16:
281 pr_debug("before %x after %x\n", *location16, 281 pr_debug("before %x after %x\n", *location16,
282 (value & 0xffff)); 282 (value & 0xffff));
283 tmp = (value & 0xffff); 283 tmp = (value & 0xffff);
@@ -286,7 +286,7 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
286 } else 286 } else
287 *location16 = tmp; 287 *location16 = tmp;
288 break; 288 break;
289 case R_huimm16: 289 case R_BFIN_HUIMM16:
290 pr_debug("before %x after %x\n", *location16, 290 pr_debug("before %x after %x\n", *location16,
291 ((value >> 16) & 0xffff)); 291 ((value >> 16) & 0xffff));
292 tmp = ((value >> 16) & 0xffff); 292 tmp = ((value >> 16) & 0xffff);
@@ -295,10 +295,10 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
295 } else 295 } else
296 *location16 = tmp; 296 *location16 = tmp;
297 break; 297 break;
298 case R_rimm16: 298 case R_BFIN_RIMM16:
299 *location16 = (value & 0xffff); 299 *location16 = (value & 0xffff);
300 break; 300 break;
301 case R_byte4_data: 301 case R_BFIN_BYTE4_DATA:
302 pr_debug("before %x after %x\n", *location32, value); 302 pr_debug("before %x after %x\n", *location32, value);
303 *location32 = value; 303 *location32 = value;
304 break; 304 break;
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index e040e03335ea..30d0843ed701 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -322,6 +322,9 @@ void finish_atomic_sections (struct pt_regs *regs)
322} 322}
323 323
324#if defined(CONFIG_ACCESS_CHECK) 324#if defined(CONFIG_ACCESS_CHECK)
325#ifdef CONFIG_ACCESS_OK_L1
326__attribute__((l1_text))
327#endif
325/* Return 1 if access to memory range is OK, 0 otherwise */ 328/* Return 1 if access to memory range is OK, 0 otherwise */
326int _access_ok(unsigned long addr, unsigned long size) 329int _access_ok(unsigned long addr, unsigned long size)
327{ 330{
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index a58687bdee6a..80447f99c2b5 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -18,9 +18,12 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/pfn.h> 19#include <linux/pfn.h>
20 20
21#ifdef CONFIG_MTD_UCLINUX
22#include <linux/mtd/map.h>
21#include <linux/ext2_fs.h> 23#include <linux/ext2_fs.h>
22#include <linux/cramfs_fs.h> 24#include <linux/cramfs_fs.h>
23#include <linux/romfs_fs.h> 25#include <linux/romfs_fs.h>
26#endif
24 27
25#include <asm/cplb.h> 28#include <asm/cplb.h>
26#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
@@ -45,6 +48,7 @@ EXPORT_SYMBOL(_ramend);
45EXPORT_SYMBOL(reserved_mem_dcache_on); 48EXPORT_SYMBOL(reserved_mem_dcache_on);
46 49
47#ifdef CONFIG_MTD_UCLINUX 50#ifdef CONFIG_MTD_UCLINUX
51extern struct map_info uclinux_ram_map;
48unsigned long memory_mtd_end, memory_mtd_start, mtd_size; 52unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
49unsigned long _ebss; 53unsigned long _ebss;
50EXPORT_SYMBOL(memory_mtd_end); 54EXPORT_SYMBOL(memory_mtd_end);
@@ -150,40 +154,45 @@ void __init bfin_relocate_l1_mem(void)
150 unsigned long l1_data_b_length; 154 unsigned long l1_data_b_length;
151 unsigned long l2_length; 155 unsigned long l2_length;
152 156
157 /*
158 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
159 * we know that everything about l1 text/data is nice and aligned,
160 * so copy by 4 byte chunks, and don't worry about overlapping
161 * src/dest.
162 *
163 * We can't use the dma_memcpy functions, since they can call
164 * scheduler functions which might be in L1 :( and core writes
165 * into L1 instruction cause bad access errors, so we are stuck,
166 * we are required to use DMA, but can't use the common dma
167 * functions. We can't use memcpy either - since that might be
168 * going to be in the relocated L1
169 */
170
153 blackfin_dma_early_init(); 171 blackfin_dma_early_init();
154 172
173 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
155 l1_code_length = _etext_l1 - _stext_l1; 174 l1_code_length = _etext_l1 - _stext_l1;
156 if (l1_code_length > L1_CODE_LENGTH) 175 if (l1_code_length)
157 panic("L1 Instruction SRAM Overflow\n"); 176 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
158 /* cannot complain as printk is not available as yet.
159 * But we can continue booting and complain later!
160 */
161
162 /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
163 dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
164 177
178 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
165 l1_data_a_length = _sbss_l1 - _sdata_l1; 179 l1_data_a_length = _sbss_l1 - _sdata_l1;
166 if (l1_data_a_length > L1_DATA_A_LENGTH) 180 if (l1_data_a_length)
167 panic("L1 Data SRAM Bank A Overflow\n"); 181 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
168
169 /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
170 dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
171 182
183 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
172 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 184 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
173 if (l1_data_b_length > L1_DATA_B_LENGTH) 185 if (l1_data_b_length)
174 panic("L1 Data SRAM Bank B Overflow\n"); 186 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
175
176 /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
177 dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
178 l1_data_a_length, l1_data_b_length); 187 l1_data_a_length, l1_data_b_length);
179 188
189 early_dma_memcpy_done();
190
191 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */
180 if (L2_LENGTH != 0) { 192 if (L2_LENGTH != 0) {
181 l2_length = _sbss_l2 - _stext_l2; 193 l2_length = _sbss_l2 - _stext_l2;
182 if (l2_length > L2_LENGTH) 194 if (l2_length)
183 panic("L2 SRAM Overflow\n"); 195 memcpy(_stext_l2, _l2_lma_start, l2_length);
184
185 /* Copy _stext_l2 to _edata_l2 to L2 SRAM */
186 dma_memcpy(_stext_l2, _l2_lma_start, l2_length);
187 } 196 }
188} 197}
189 198
@@ -472,7 +481,7 @@ static __init void memory_setup(void)
472 481
473 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 482 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
474 console_init(); 483 console_init();
475 panic("DMA region exceeds memory limit: %lu.\n", 484 panic("DMA region exceeds memory limit: %lu.",
476 _ramend - _ramstart); 485 _ramend - _ramstart);
477 } 486 }
478 memory_end = _ramend - DMA_UNCACHED_REGION; 487 memory_end = _ramend - DMA_UNCACHED_REGION;
@@ -526,14 +535,13 @@ static __init void memory_setup(void)
526 535
527 if (mtd_size == 0) { 536 if (mtd_size == 0) {
528 console_init(); 537 console_init();
529 panic("Don't boot kernel without rootfs attached.\n"); 538 panic("Don't boot kernel without rootfs attached.");
530 } 539 }
531 540
532 /* Relocate MTD image to the top of memory after the uncached memory area */ 541 /* Relocate MTD image to the top of memory after the uncached memory area */
533 dma_memcpy((char *)memory_end, _end, mtd_size); 542 uclinux_ram_map.phys = memory_mtd_start = memory_end;
534 543 uclinux_ram_map.size = mtd_size;
535 memory_mtd_start = memory_end; 544 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
536 _ebss = memory_mtd_start; /* define _ebss for compatible */
537#endif /* CONFIG_MTD_UCLINUX */ 545#endif /* CONFIG_MTD_UCLINUX */
538 546
539#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 547#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
@@ -796,10 +804,8 @@ void __init setup_arch(char **cmdline_p)
796 cclk = get_cclk(); 804 cclk = get_cclk();
797 sclk = get_sclk(); 805 sclk = get_sclk();
798 806
799#if !defined(CONFIG_BFIN_KERNEL_CLOCK) 807 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
800 if (ANOMALY_05000273 && cclk == sclk) 808 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
801 panic("ANOMALY 05000273, SCLK can not be same as CCLK");
802#endif
803 809
804#ifdef BF561_FAMILY 810#ifdef BF561_FAMILY
805 if (ANOMALY_05000266) { 811 if (ANOMALY_05000266) {
@@ -881,7 +887,7 @@ void __init setup_arch(char **cmdline_p)
881 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 887 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
882 bfin_compiled_revid(), bfin_revid()); 888 bfin_compiled_revid(), bfin_revid());
883 if (bfin_compiled_revid() > bfin_revid()) 889 if (bfin_compiled_revid() > bfin_revid())
884 panic("Error: you are missing anomaly workarounds for this rev\n"); 890 panic("Error: you are missing anomaly workarounds for this rev");
885 } 891 }
886 } 892 }
887 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 893 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
@@ -891,16 +897,13 @@ void __init setup_arch(char **cmdline_p)
891 897
892 /* We can't run on BF548-0.1 due to ANOMALY 05000448 */ 898 /* We can't run on BF548-0.1 due to ANOMALY 05000448 */
893 if (bfin_cpuid() == 0x27de && bfin_revid() == 1) 899 if (bfin_cpuid() == 0x27de && bfin_revid() == 1)
894 panic("You can't run on this processor due to 05000448\n"); 900 panic("You can't run on this processor due to 05000448");
895 901
896 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 902 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
897 903
898 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 904 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
899 cclk / 1000000, sclk / 1000000); 905 cclk / 1000000, sclk / 1000000);
900 906
901 if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
902 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
903
904 setup_bootmem_allocator(); 907 setup_bootmem_allocator();
905 908
906 paging_init(); 909 paging_init();
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index fce49d7cf001..a8f1329c15a4 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -78,11 +78,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
78 return do_mmap2(addr, len, prot, flags, fd, pgoff); 78 return do_mmap2(addr, len, prot, flags, fd, pgoff);
79} 79}
80 80
81asmlinkage int sys_getpagesize(void)
82{
83 return PAGE_SIZE;
84}
85
86asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) 81asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
87{ 82{
88 return sram_alloc_with_lsl(size, flags); 83 return sram_alloc_with_lsl(size, flags);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 27646121280a..0791eba40d9f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -20,8 +20,9 @@
20 20
21#include <asm/blackfin.h> 21#include <asm/blackfin.h>
22#include <asm/time.h> 22#include <asm/time.h>
23#include <asm/gptimers.h>
23 24
24#ifdef CONFIG_CYCLES_CLOCKSOURCE 25#if defined(CONFIG_CYCLES_CLOCKSOURCE)
25 26
26/* Accelerators for sched_clock() 27/* Accelerators for sched_clock()
27 * convert from cycles(64bits) => nanoseconds (64bits) 28 * convert from cycles(64bits) => nanoseconds (64bits)
@@ -58,15 +59,15 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc)
58 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; 59 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
59} 60}
60 61
61static cycle_t read_cycles(struct clocksource *cs) 62static cycle_t bfin_read_cycles(struct clocksource *cs)
62{ 63{
63 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 64 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
64} 65}
65 66
66static struct clocksource clocksource_bfin = { 67static struct clocksource bfin_cs_cycles = {
67 .name = "bfin_cycles", 68 .name = "bfin_cs_cycles",
68 .rating = 350, 69 .rating = 350,
69 .read = read_cycles, 70 .read = bfin_read_cycles,
70 .mask = CLOCKSOURCE_MASK(64), 71 .mask = CLOCKSOURCE_MASK(64),
71 .shift = 22, 72 .shift = 22,
72 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 73 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -74,53 +75,198 @@ static struct clocksource clocksource_bfin = {
74 75
75unsigned long long sched_clock(void) 76unsigned long long sched_clock(void)
76{ 77{
77 return cycles_2_ns(read_cycles(&clocksource_bfin)); 78 return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles));
78} 79}
79 80
80static int __init bfin_clocksource_init(void) 81static int __init bfin_cs_cycles_init(void)
81{ 82{
82 set_cyc2ns_scale(get_cclk() / 1000); 83 set_cyc2ns_scale(get_cclk() / 1000);
83 84
84 clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift); 85 bfin_cs_cycles.mult = \
86 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
85 87
86 if (clocksource_register(&clocksource_bfin)) 88 if (clocksource_register(&bfin_cs_cycles))
87 panic("failed to register clocksource"); 89 panic("failed to register clocksource");
88 90
89 return 0; 91 return 0;
90} 92}
93#else
94# define bfin_cs_cycles_init()
95#endif
96
97#ifdef CONFIG_GPTMR0_CLOCKSOURCE
98
99void __init setup_gptimer0(void)
100{
101 disable_gptimers(TIMER0bit);
102
103 set_gptimer_config(TIMER0_id, \
104 TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
105 set_gptimer_period(TIMER0_id, -1);
106 set_gptimer_pwidth(TIMER0_id, -2);
107 SSYNC();
108 enable_gptimers(TIMER0bit);
109}
110
111static cycle_t bfin_read_gptimer0(void)
112{
113 return bfin_read_TIMER0_COUNTER();
114}
115
116static struct clocksource bfin_cs_gptimer0 = {
117 .name = "bfin_cs_gptimer0",
118 .rating = 400,
119 .read = bfin_read_gptimer0,
120 .mask = CLOCKSOURCE_MASK(32),
121 .shift = 22,
122 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
123};
124
125static int __init bfin_cs_gptimer0_init(void)
126{
127 setup_gptimer0();
91 128
129 bfin_cs_gptimer0.mult = \
130 clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);
131
132 if (clocksource_register(&bfin_cs_gptimer0))
133 panic("failed to register clocksource");
134
135 return 0;
136}
92#else 137#else
93# define bfin_clocksource_init() 138# define bfin_cs_gptimer0_init()
94#endif 139#endif
95 140
141#ifdef CONFIG_CORE_TIMER_IRQ_L1
142__attribute__((l1_text))
143#endif
144irqreturn_t timer_interrupt(int irq, void *dev_id);
145
146static int bfin_timer_set_next_event(unsigned long, \
147 struct clock_event_device *);
148
149static void bfin_timer_set_mode(enum clock_event_mode, \
150 struct clock_event_device *);
151
152static struct clock_event_device clockevent_bfin = {
153#if defined(CONFIG_TICKSOURCE_GPTMR0)
154 .name = "bfin_gptimer0",
155 .rating = 300,
156 .irq = IRQ_TIMER0,
157#else
158 .name = "bfin_core_timer",
159 .rating = 350,
160 .irq = IRQ_CORETMR,
161#endif
162 .shift = 32,
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .set_next_event = bfin_timer_set_next_event,
165 .set_mode = bfin_timer_set_mode,
166};
167
168static struct irqaction bfin_timer_irq = {
169#if defined(CONFIG_TICKSOURCE_GPTMR0)
170 .name = "Blackfin GPTimer0",
171#else
172 .name = "Blackfin CoreTimer",
173#endif
174 .flags = IRQF_DISABLED | IRQF_TIMER | \
175 IRQF_IRQPOLL | IRQF_PERCPU,
176 .handler = timer_interrupt,
177 .dev_id = &clockevent_bfin,
178};
179
180#if defined(CONFIG_TICKSOURCE_GPTMR0)
96static int bfin_timer_set_next_event(unsigned long cycles, 181static int bfin_timer_set_next_event(unsigned long cycles,
97 struct clock_event_device *evt) 182 struct clock_event_device *evt)
98{ 183{
184 disable_gptimers(TIMER0bit);
185
186 /* it starts counting three SCLK cycles after the TIMENx bit is set */
187 set_gptimer_pwidth(TIMER0_id, cycles - 3);
188 enable_gptimers(TIMER0bit);
189 return 0;
190}
191
192static void bfin_timer_set_mode(enum clock_event_mode mode,
193 struct clock_event_device *evt)
194{
195 switch (mode) {
196 case CLOCK_EVT_MODE_PERIODIC: {
197 set_gptimer_config(TIMER0_id, \
198 TIMER_OUT_DIS | TIMER_IRQ_ENA | \
199 TIMER_PERIOD_CNT | TIMER_MODE_PWM);
200 set_gptimer_period(TIMER0_id, get_sclk() / HZ);
201 set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
202 enable_gptimers(TIMER0bit);
203 break;
204 }
205 case CLOCK_EVT_MODE_ONESHOT:
206 disable_gptimers(TIMER0bit);
207 set_gptimer_config(TIMER0_id, \
208 TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
209 set_gptimer_period(TIMER0_id, 0);
210 break;
211 case CLOCK_EVT_MODE_UNUSED:
212 case CLOCK_EVT_MODE_SHUTDOWN:
213 disable_gptimers(TIMER0bit);
214 break;
215 case CLOCK_EVT_MODE_RESUME:
216 break;
217 }
218}
219
220static void bfin_timer_ack(void)
221{
222 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
223}
224
225static void __init bfin_timer_init(void)
226{
227 disable_gptimers(TIMER0bit);
228}
229
230static unsigned long __init bfin_clockevent_check(void)
231{
232 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
233 return get_sclk();
234}
235
236#else /* CONFIG_TICKSOURCE_CORETMR */
237
238static int bfin_timer_set_next_event(unsigned long cycles,
239 struct clock_event_device *evt)
240{
241 bfin_write_TCNTL(TMPWR);
242 CSYNC();
99 bfin_write_TCOUNT(cycles); 243 bfin_write_TCOUNT(cycles);
100 CSYNC(); 244 CSYNC();
245 bfin_write_TCNTL(TMPWR | TMREN);
101 return 0; 246 return 0;
102} 247}
103 248
104static void bfin_timer_set_mode(enum clock_event_mode mode, 249static void bfin_timer_set_mode(enum clock_event_mode mode,
105 struct clock_event_device *evt) 250 struct clock_event_device *evt)
106{ 251{
107 switch (mode) { 252 switch (mode) {
108 case CLOCK_EVT_MODE_PERIODIC: { 253 case CLOCK_EVT_MODE_PERIODIC: {
109 unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); 254 unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
110 bfin_write_TCNTL(TMPWR); 255 bfin_write_TCNTL(TMPWR);
111 bfin_write_TSCALE(TIME_SCALE - 1);
112 CSYNC(); 256 CSYNC();
257 bfin_write_TSCALE(TIME_SCALE - 1);
113 bfin_write_TPERIOD(tcount); 258 bfin_write_TPERIOD(tcount);
114 bfin_write_TCOUNT(tcount); 259 bfin_write_TCOUNT(tcount);
115 bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
116 CSYNC(); 260 CSYNC();
261 bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
117 break; 262 break;
118 } 263 }
119 case CLOCK_EVT_MODE_ONESHOT: 264 case CLOCK_EVT_MODE_ONESHOT:
265 bfin_write_TCNTL(TMPWR);
266 CSYNC();
120 bfin_write_TSCALE(TIME_SCALE - 1); 267 bfin_write_TSCALE(TIME_SCALE - 1);
268 bfin_write_TPERIOD(0);
121 bfin_write_TCOUNT(0); 269 bfin_write_TCOUNT(0);
122 bfin_write_TCNTL(TMPWR | TMREN);
123 CSYNC();
124 break; 270 break;
125 case CLOCK_EVT_MODE_UNUSED: 271 case CLOCK_EVT_MODE_UNUSED:
126 case CLOCK_EVT_MODE_SHUTDOWN: 272 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -132,6 +278,10 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
132 } 278 }
133} 279}
134 280
281static void bfin_timer_ack(void)
282{
283}
284
135static void __init bfin_timer_init(void) 285static void __init bfin_timer_init(void)
136{ 286{
137 /* power up the timer, but don't enable it just yet */ 287 /* power up the timer, but don't enable it just yet */
@@ -145,38 +295,32 @@ static void __init bfin_timer_init(void)
145 bfin_write_TPERIOD(0); 295 bfin_write_TPERIOD(0);
146 bfin_write_TCOUNT(0); 296 bfin_write_TCOUNT(0);
147 297
148 /* now enable the timer */
149 CSYNC(); 298 CSYNC();
150} 299}
151 300
301static unsigned long __init bfin_clockevent_check(void)
302{
303 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
304 return get_cclk() / TIME_SCALE;
305}
306
307void __init setup_core_timer(void)
308{
309 bfin_timer_init();
310 bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL);
311}
312#endif /* CONFIG_TICKSOURCE_GPTMR0 */
313
152/* 314/*
153 * timer_interrupt() needs to keep up the real-time clock, 315 * timer_interrupt() needs to keep up the real-time clock,
154 * as well as call the "do_timer()" routine every clocktick 316 * as well as call the "do_timer()" routine every clocktick
155 */ 317 */
156#ifdef CONFIG_CORE_TIMER_IRQ_L1
157__attribute__((l1_text))
158#endif
159irqreturn_t timer_interrupt(int irq, void *dev_id);
160
161static struct clock_event_device clockevent_bfin = {
162 .name = "bfin_core_timer",
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .shift = 32,
165 .set_next_event = bfin_timer_set_next_event,
166 .set_mode = bfin_timer_set_mode,
167};
168
169static struct irqaction bfin_timer_irq = {
170 .name = "Blackfin Core Timer",
171 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
172 .handler = timer_interrupt,
173 .dev_id = &clockevent_bfin,
174};
175
176irqreturn_t timer_interrupt(int irq, void *dev_id) 318irqreturn_t timer_interrupt(int irq, void *dev_id)
177{ 319{
178 struct clock_event_device *evt = dev_id; 320 struct clock_event_device *evt = dev_id;
321 smp_mb();
179 evt->event_handler(evt); 322 evt->event_handler(evt);
323 bfin_timer_ack();
180 return IRQ_HANDLED; 324 return IRQ_HANDLED;
181} 325}
182 326
@@ -184,9 +328,8 @@ static int __init bfin_clockevent_init(void)
184{ 328{
185 unsigned long timer_clk; 329 unsigned long timer_clk;
186 330
187 timer_clk = get_cclk() / TIME_SCALE; 331 timer_clk = bfin_clockevent_check();
188 332
189 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
190 bfin_timer_init(); 333 bfin_timer_init();
191 334
192 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 335 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
@@ -218,6 +361,7 @@ void __init time_init(void)
218 xtime.tv_nsec = 0; 361 xtime.tv_nsec = 0;
219 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); 362 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
220 363
221 bfin_clocksource_init(); 364 bfin_cs_cycles_init();
365 bfin_cs_gptimer0_init();
222 bfin_clockevent_init(); 366 bfin_clockevent_init();
223} 367}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 1bbacfbd4c5d..adb54aa7d7c8 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -24,14 +24,10 @@
24 24
25static struct irqaction bfin_timer_irq = { 25static struct irqaction bfin_timer_irq = {
26 .name = "Blackfin Timer Tick", 26 .name = "Blackfin Timer Tick",
27#ifdef CONFIG_IRQ_PER_CPU
28 .flags = IRQF_DISABLED | IRQF_PERCPU,
29#else
30 .flags = IRQF_DISABLED 27 .flags = IRQF_DISABLED
31#endif
32}; 28};
33 29
34#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 30#if defined(CONFIG_IPIPE)
35void __init setup_system_timer0(void) 31void __init setup_system_timer0(void)
36{ 32{
37 /* Power down the core timer, just to play safe. */ 33 /* Power down the core timer, just to play safe. */
@@ -74,7 +70,7 @@ void __init setup_core_timer(void)
74static void __init 70static void __init
75time_sched_init(irqreturn_t(*timer_routine) (int, void *)) 71time_sched_init(irqreturn_t(*timer_routine) (int, void *))
76{ 72{
77#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 73#if defined(CONFIG_IPIPE)
78 setup_system_timer0(); 74 setup_system_timer0();
79 bfin_timer_irq.handler = timer_routine; 75 bfin_timer_irq.handler = timer_routine;
80 setup_irq(IRQ_TIMER0, &bfin_timer_irq); 76 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
@@ -94,7 +90,7 @@ static unsigned long gettimeoffset(void)
94 unsigned long offset; 90 unsigned long offset;
95 unsigned long clocks_per_jiffy; 91 unsigned long clocks_per_jiffy;
96 92
97#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 93#if defined(CONFIG_IPIPE)
98 clocks_per_jiffy = bfin_read_TIMER0_PERIOD(); 94 clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
99 offset = bfin_read_TIMER0_COUNTER() / \ 95 offset = bfin_read_TIMER0_COUNTER() / \
100 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC); 96 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
@@ -133,36 +129,25 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
133 static long last_rtc_update; 129 static long last_rtc_update;
134 130
135 write_seqlock(&xtime_lock); 131 write_seqlock(&xtime_lock);
136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) 132 do_timer(1);
133
137 /* 134 /*
138 * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is 135 * If we have an externally synchronized Linux clock, then update
139 * enabled. 136 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
137 * called as close as possible to 500 ms before the new second starts.
140 */ 138 */
141 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { 139 if (ntp_synced() &&
142#endif 140 xtime.tv_sec > last_rtc_update + 660 &&
143 do_timer(1); 141 (xtime.tv_nsec / NSEC_PER_USEC) >=
144 142 500000 - ((unsigned)TICK_SIZE) / 2
145 /* 143 && (xtime.tv_nsec / NSEC_PER_USEC) <=
146 * If we have an externally synchronized Linux clock, then update 144 500000 + ((unsigned)TICK_SIZE) / 2) {
147 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 145 if (set_rtc_mmss(xtime.tv_sec) == 0)
148 * called as close as possible to 500 ms before the new second starts. 146 last_rtc_update = xtime.tv_sec;
149 */ 147 else
150 if (ntp_synced() && 148 /* Do it again in 60s. */
151 xtime.tv_sec > last_rtc_update + 660 && 149 last_rtc_update = xtime.tv_sec - 600;
152 (xtime.tv_nsec / NSEC_PER_USEC) >=
153 500000 - ((unsigned)TICK_SIZE) / 2
154 && (xtime.tv_nsec / NSEC_PER_USEC) <=
155 500000 + ((unsigned)TICK_SIZE) / 2) {
156 if (set_rtc_mmss(xtime.tv_sec) == 0)
157 last_rtc_update = xtime.tv_sec;
158 else
159 /* Do it again in 60s. */
160 last_rtc_update = xtime.tv_sec - 600;
161 }
162#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
163 set_gptimer_status(0, TIMER_STATUS_TIMIL0);
164 } 150 }
165#endif
166 write_sequnlock(&xtime_lock); 151 write_sequnlock(&xtime_lock);
167 152
168#ifdef CONFIG_IPIPE 153#ifdef CONFIG_IPIPE
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index ffe7fb53eccb..aa76dfb0226e 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -68,6 +68,13 @@
68 ({ if (0) printk(fmt, ##arg); 0; }) 68 ({ if (0) printk(fmt, ##arg); 0; })
69#endif 69#endif
70 70
71#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
72u32 last_seqstat;
73#ifdef CONFIG_DEBUG_MMRS_MODULE
74EXPORT_SYMBOL(last_seqstat);
75#endif
76#endif
77
71/* Initiate the event table handler */ 78/* Initiate the event table handler */
72void __init trap_init(void) 79void __init trap_init(void)
73{ 80{
@@ -79,7 +86,6 @@ void __init trap_init(void)
79static void decode_address(char *buf, unsigned long address) 86static void decode_address(char *buf, unsigned long address)
80{ 87{
81#ifdef CONFIG_DEBUG_VERBOSE 88#ifdef CONFIG_DEBUG_VERBOSE
82 struct vm_list_struct *vml;
83 struct task_struct *p; 89 struct task_struct *p;
84 struct mm_struct *mm; 90 struct mm_struct *mm;
85 unsigned long flags, offset; 91 unsigned long flags, offset;
@@ -196,6 +202,11 @@ done:
196 202
197asmlinkage void double_fault_c(struct pt_regs *fp) 203asmlinkage void double_fault_c(struct pt_regs *fp)
198{ 204{
205#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
206 int j;
207 trace_buffer_save(j);
208#endif
209
199 console_verbose(); 210 console_verbose();
200 oops_in_progress = 1; 211 oops_in_progress = 1;
201#ifdef CONFIG_DEBUG_VERBOSE 212#ifdef CONFIG_DEBUG_VERBOSE
@@ -220,9 +231,10 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
220 dump_bfin_process(fp); 231 dump_bfin_process(fp);
221 dump_bfin_mem(fp); 232 dump_bfin_mem(fp);
222 show_regs(fp); 233 show_regs(fp);
234 dump_bfin_trace_buffer();
223 } 235 }
224#endif 236#endif
225 panic("Double Fault - unrecoverable event\n"); 237 panic("Double Fault - unrecoverable event");
226 238
227} 239}
228 240
@@ -239,6 +251,9 @@ asmlinkage void trap_c(struct pt_regs *fp)
239 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; 251 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
240 252
241 trace_buffer_save(j); 253 trace_buffer_save(j);
254#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
255 last_seqstat = (u32)fp->seqstat;
256#endif
242 257
243 /* Important - be very careful dereferncing pointers - will lead to 258 /* Important - be very careful dereferncing pointers - will lead to
244 * double faults if the stack has become corrupt 259 * double faults if the stack has become corrupt
@@ -588,6 +603,9 @@ asmlinkage void trap_c(struct pt_regs *fp)
588 force_sig_info(sig, &info, current); 603 force_sig_info(sig, &info, current);
589 } 604 }
590 605
606 if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8))
607 fp->pc = SAFE_USER_INSTRUCTION;
608
591 trace_buffer_restore(j); 609 trace_buffer_restore(j);
592 return; 610 return;
593} 611}
@@ -832,6 +850,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
832 decode_address(buf, (unsigned int)stack); 850 decode_address(buf, (unsigned int)stack);
833 printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); 851 printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
834 852
853 if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
854 printk(KERN_NOTICE "Invalid stack pointer\n");
855 return;
856 }
857
835 /* First thing is to look for a frame pointer */ 858 /* First thing is to look for a frame pointer */
836 for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { 859 for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
837 if (*addr & 0x1) 860 if (*addr & 0x1)
@@ -1066,6 +1089,29 @@ void show_regs(struct pt_regs *fp)
1066 unsigned int cpu = smp_processor_id(); 1089 unsigned int cpu = smp_processor_id();
1067 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); 1090 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
1068 1091
1092 verbose_printk(KERN_NOTICE "\n");
1093 if (CPUID != bfin_cpuid())
1094 verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
1095 "but running on:0x%04x (Rev %d)\n",
1096 CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
1097
1098 verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
1099 CPU, bfin_compiled_revid());
1100
1101 if (bfin_compiled_revid() != bfin_revid())
1102 verbose_printk("(Detected 0.%d)", bfin_revid());
1103
1104 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
1105 get_cclk()/1000000, get_sclk()/1000000,
1106#ifdef CONFIG_MPU
1107 "mpu on"
1108#else
1109 "mpu off"
1110#endif
1111 );
1112
1113 verbose_printk(KERN_NOTICE "%s", linux_banner);
1114
1069 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted()); 1115 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted());
1070 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n", 1116 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n",
1071 (long)fp->seqstat, fp->ipend, fp->syscfg); 1117 (long)fp->seqstat, fp->ipend, fp->syscfg);
@@ -1246,5 +1292,5 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
1246 dump_bfin_mem(fp); 1292 dump_bfin_mem(fp);
1247 show_regs(fp); 1293 show_regs(fp);
1248 dump_stack(); 1294 dump_stack();
1249 panic("Unrecoverable event\n"); 1295 panic("Unrecoverable event");
1250} 1296}
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 27952ae047d8..8b67167cb4f4 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -50,7 +50,9 @@ SECTIONS
50 _text = .; 50 _text = .;
51 __stext = .; 51 __stext = .;
52 TEXT_TEXT 52 TEXT_TEXT
53#ifndef CONFIG_SCHEDULE_L1
53 SCHED_TEXT 54 SCHED_TEXT
55#endif
54 LOCK_TEXT 56 LOCK_TEXT
55 KPROBES_TEXT 57 KPROBES_TEXT
56 *(.text.*) 58 *(.text.*)
@@ -180,6 +182,9 @@ SECTIONS
180 . = ALIGN(4); 182 . = ALIGN(4);
181 __stext_l1 = .; 183 __stext_l1 = .;
182 *(.l1.text) 184 *(.l1.text)
185#ifdef CONFIG_SCHEDULE_L1
186 SCHED_TEXT
187#endif
183 . = ALIGN(4); 188 . = ALIGN(4);
184 __etext_l1 = .; 189 __etext_l1 = .;
185 } 190 }
diff --git a/arch/blackfin/mach-bf518/Kconfig b/arch/blackfin/mach-bf518/Kconfig
index f397ede006bf..4c76fefb7a3b 100644
--- a/arch/blackfin/mach-bf518/Kconfig
+++ b/arch/blackfin/mach-bf518/Kconfig
@@ -156,6 +156,7 @@ config IRQ_PORTH_INTB
156 default 11 156 default 11
157config IRQ_TIMER0 157config IRQ_TIMER0
158 int "IRQ_TIMER0" 158 int "IRQ_TIMER0"
159 default 7 if TICKSOURCE_GPTMR0
159 default 8 160 default 8
160config IRQ_TIMER1 161config IRQ_TIMER1
161 int "IRQ_TIMER1" 162 int "IRQ_TIMER1"
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 41f2eacfef20..62bba09bcce6 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -82,7 +82,11 @@ static struct physmap_flash_data ezbrd_flash_data = {
82 82
83static struct resource ezbrd_flash_resource = { 83static struct resource ezbrd_flash_resource = {
84 .start = 0x20000000, 84 .start = 0x20000000,
85#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
86 .end = 0x202fffff,
87#else
85 .end = 0x203fffff, 88 .end = 0x203fffff,
89#endif
86 .flags = IORESOURCE_MEM, 90 .flags = IORESOURCE_MEM,
87}; 91};
88 92
@@ -162,8 +166,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
162}; 166};
163#endif 167#endif
164 168
165#if defined(CONFIG_SPI_ADC_BF533) \ 169#if defined(CONFIG_BFIN_SPI_ADC) \
166 || defined(CONFIG_SPI_ADC_BF533_MODULE) 170 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
167/* SPI ADC chip */ 171/* SPI ADC chip */
168static struct bfin5xx_spi_chip spi_adc_chip_info = { 172static struct bfin5xx_spi_chip spi_adc_chip_info = {
169 .enable_dma = 1, /* use dma transfer with this chip*/ 173 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -249,8 +253,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
249 }, 253 },
250#endif 254#endif
251 255
252#if defined(CONFIG_SPI_ADC_BF533) \ 256#if defined(CONFIG_BFIN_SPI_ADC) \
253 || defined(CONFIG_SPI_ADC_BF533_MODULE) 257 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
254 { 258 {
255 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 259 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
256 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 260 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -514,7 +518,7 @@ static struct platform_device i2c_bfin_twi_device = {
514#endif 518#endif
515 519
516static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 520static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
517#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 521#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
518 { 522 {
519 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 523 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
520 }, 524 },
@@ -678,6 +682,11 @@ static int __init ezbrd_init(void)
678 ARRAY_SIZE(bfin_i2c_board_info)); 682 ARRAY_SIZE(bfin_i2c_board_info));
679 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); 683 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
680 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 684 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
685 /* setup BF518-EZBRD GPIO pin PG11 to AMS2, PG15 to AMS3. */
686 peripheral_request(P_AMS2, "ParaFlash");
687#if !defined(CONFIG_SPI_BFIN) && !defined(CONFIG_SPI_BFIN_MODULE)
688 peripheral_request(P_AMS3, "ParaFlash");
689#endif
681 return 0; 690 return 0;
682} 691}
683 692
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index c847bb101076..b69bd9af38dd 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -6,14 +6,19 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List 10 * - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
11 */ 11 */
12 12
13/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
14#if __SILICON_REVISION__ < 0
15# error will not work on BF518 silicon version
16#endif
17
13#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
14#define _MACH_ANOMALY_H_ 19#define _MACH_ANOMALY_H_
15 20
16/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */ 21/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
17#define ANOMALY_05000074 (1) 22#define ANOMALY_05000074 (1)
18/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 23/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
19#define ANOMALY_05000122 (1) 24#define ANOMALY_05000122 (1)
@@ -47,7 +52,7 @@
47#define ANOMALY_05000435 (1) 52#define ANOMALY_05000435 (1)
48/* PORTx_DRIVE and PORTx_HYSTERESIS Registers Read Back Incorrect Values */ 53/* PORTx_DRIVE and PORTx_HYSTERESIS Registers Read Back Incorrect Values */
49#define ANOMALY_05000438 (1) 54#define ANOMALY_05000438 (1)
50/* Preboot Cannot be Used to Program the PLL_DIV Register */ 55/* Preboot Cannot be Used to Alter the PLL_DIV Register */
51#define ANOMALY_05000439 (1) 56#define ANOMALY_05000439 (1)
52/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */ 57/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */
53#define ANOMALY_05000440 (1) 58#define ANOMALY_05000440 (1)
@@ -61,32 +66,56 @@
61#define ANOMALY_05000453 (1) 66#define ANOMALY_05000453 (1)
62/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */ 67/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */
63#define ANOMALY_05000455 (1) 68#define ANOMALY_05000455 (1)
69/* False Hardware Error when RETI points to invalid memory */
70#define ANOMALY_05000461 (1)
64 71
65/* Anomalies that don't exist on this proc */ 72/* Anomalies that don't exist on this proc */
73#define ANOMALY_05000099 (0)
74#define ANOMALY_05000119 (0)
75#define ANOMALY_05000120 (0)
66#define ANOMALY_05000125 (0) 76#define ANOMALY_05000125 (0)
77#define ANOMALY_05000149 (0)
67#define ANOMALY_05000158 (0) 78#define ANOMALY_05000158 (0)
79#define ANOMALY_05000171 (0)
80#define ANOMALY_05000179 (0)
68#define ANOMALY_05000183 (0) 81#define ANOMALY_05000183 (0)
69#define ANOMALY_05000198 (0) 82#define ANOMALY_05000198 (0)
83#define ANOMALY_05000215 (0)
84#define ANOMALY_05000220 (0)
85#define ANOMALY_05000227 (0)
70#define ANOMALY_05000230 (0) 86#define ANOMALY_05000230 (0)
87#define ANOMALY_05000231 (0)
88#define ANOMALY_05000233 (0)
89#define ANOMALY_05000242 (0)
71#define ANOMALY_05000244 (0) 90#define ANOMALY_05000244 (0)
91#define ANOMALY_05000248 (0)
92#define ANOMALY_05000250 (0)
72#define ANOMALY_05000261 (0) 93#define ANOMALY_05000261 (0)
73#define ANOMALY_05000263 (0) 94#define ANOMALY_05000263 (0)
74#define ANOMALY_05000266 (0) 95#define ANOMALY_05000266 (0)
75#define ANOMALY_05000273 (0) 96#define ANOMALY_05000273 (0)
97#define ANOMALY_05000274 (0)
76#define ANOMALY_05000278 (0) 98#define ANOMALY_05000278 (0)
77#define ANOMALY_05000285 (0) 99#define ANOMALY_05000285 (0)
100#define ANOMALY_05000287 (0)
101#define ANOMALY_05000301 (0)
78#define ANOMALY_05000305 (0) 102#define ANOMALY_05000305 (0)
79#define ANOMALY_05000307 (0) 103#define ANOMALY_05000307 (0)
80#define ANOMALY_05000311 (0) 104#define ANOMALY_05000311 (0)
81#define ANOMALY_05000312 (0) 105#define ANOMALY_05000312 (0)
82#define ANOMALY_05000323 (0) 106#define ANOMALY_05000323 (0)
83#define ANOMALY_05000353 (0) 107#define ANOMALY_05000353 (0)
108#define ANOMALY_05000362 (1)
84#define ANOMALY_05000363 (0) 109#define ANOMALY_05000363 (0)
85#define ANOMALY_05000380 (0) 110#define ANOMALY_05000380 (0)
86#define ANOMALY_05000386 (0) 111#define ANOMALY_05000386 (0)
112#define ANOMALY_05000389 (0)
113#define ANOMALY_05000400 (0)
87#define ANOMALY_05000412 (0) 114#define ANOMALY_05000412 (0)
88#define ANOMALY_05000432 (0) 115#define ANOMALY_05000432 (0)
89#define ANOMALY_05000447 (0) 116#define ANOMALY_05000447 (0)
90#define ANOMALY_05000448 (0) 117#define ANOMALY_05000448 (0)
118#define ANOMALY_05000456 (0)
119#define ANOMALY_05000450 (0)
91 120
92#endif 121#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/portmux.h b/arch/blackfin/mach-bf518/include/mach/portmux.h
index f618b487b2b0..a0fc77fd3315 100644
--- a/arch/blackfin/mach-bf518/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf518/include/mach/portmux.h
@@ -185,6 +185,10 @@
185#define P_PTP_PPS (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(2)) 185#define P_PTP_PPS (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(2))
186#define P_PTP_CLKOUT (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(2)) 186#define P_PTP_CLKOUT (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(2))
187 187
188#define P_HWAIT (P_DEFINED | P_IDENT(GPIO_PG000000000) | P_FUNCT(1)) 188/* AMS */
189#define P_AMS2 (P_DEFINED | P_IDENT(GPIO_PG11) | P_FUNCT(1))
190#define P_AMS3 (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(2))
191
192#define P_HWAIT (P_DEFINED | P_IDENT(GPIO_PG000000000) | P_FUNCT(1))
189 193
190#endif /* _MACH_PORTMUX_H_ */ 194#endif /* _MACH_PORTMUX_H_ */
diff --git a/arch/blackfin/mach-bf527/Kconfig b/arch/blackfin/mach-bf527/Kconfig
index 8438ec6d6679..848ac6f86823 100644
--- a/arch/blackfin/mach-bf527/Kconfig
+++ b/arch/blackfin/mach-bf527/Kconfig
@@ -170,6 +170,7 @@ config IRQ_PORTH_INTB
170 default 11 170 default 11
171config IRQ_TIMER0 171config IRQ_TIMER0
172 int "IRQ_TIMER0" 172 int "IRQ_TIMER0"
173 default 7 if TICKSOURCE_GPTMR0
173 default 8 174 default 8
174config IRQ_TIMER1 175config IRQ_TIMER1
175 int "IRQ_TIMER1" 176 int "IRQ_TIMER1"
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 48e69eecdba4..6d6f9effa0bb 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -463,8 +463,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
463}; 463};
464#endif 464#endif
465 465
466#if defined(CONFIG_SPI_ADC_BF533) \ 466#if defined(CONFIG_BFIN_SPI_ADC) \
467 || defined(CONFIG_SPI_ADC_BF533_MODULE) 467 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
468/* SPI ADC chip */ 468/* SPI ADC chip */
469static struct bfin5xx_spi_chip spi_adc_chip_info = { 469static struct bfin5xx_spi_chip spi_adc_chip_info = {
470 .enable_dma = 1, /* use dma transfer with this chip*/ 470 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -554,8 +554,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
554 }, 554 },
555#endif 555#endif
556 556
557#if defined(CONFIG_SPI_ADC_BF533) \ 557#if defined(CONFIG_BFIN_SPI_ADC) \
558 || defined(CONFIG_SPI_ADC_BF533_MODULE) 558 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
559 { 559 {
560 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 560 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
561 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 561 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -789,7 +789,7 @@ static struct platform_device i2c_bfin_twi_device = {
789#endif 789#endif
790 790
791static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 791static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
792#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 792#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
793 { 793 {
794 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 794 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
795 .type = "pcf8574_lcd", 795 .type = "pcf8574_lcd",
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 7fe480e4ebe8..1435c5d38cd5 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -247,8 +247,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
247}; 247};
248#endif 248#endif
249 249
250#if defined(CONFIG_SPI_ADC_BF533) \ 250#if defined(CONFIG_BFIN_SPI_ADC) \
251 || defined(CONFIG_SPI_ADC_BF533_MODULE) 251 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
252/* SPI ADC chip */ 252/* SPI ADC chip */
253static struct bfin5xx_spi_chip spi_adc_chip_info = { 253static struct bfin5xx_spi_chip spi_adc_chip_info = {
254 .enable_dma = 1, /* use dma transfer with this chip*/ 254 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -354,8 +354,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
354 }, 354 },
355#endif 355#endif
356 356
357#if defined(CONFIG_SPI_ADC_BF533) \ 357#if defined(CONFIG_BFIN_SPI_ADC) \
358 || defined(CONFIG_SPI_ADC_BF533_MODULE) 358 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
359 { 359 {
360 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 360 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
361 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 361 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -586,7 +586,7 @@ static struct platform_device i2c_bfin_twi_device = {
586#endif 586#endif
587 587
588static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 588static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
589#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 589#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
590 { 590 {
591 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 591 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
592 }, 592 },
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index d0864111ef59..147edd1eb1ad 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -485,8 +485,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
485}; 485};
486#endif 486#endif
487 487
488#if defined(CONFIG_SPI_ADC_BF533) \ 488#if defined(CONFIG_BFIN_SPI_ADC) \
489 || defined(CONFIG_SPI_ADC_BF533_MODULE) 489 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
490/* SPI ADC chip */ 490/* SPI ADC chip */
491static struct bfin5xx_spi_chip spi_adc_chip_info = { 491static struct bfin5xx_spi_chip spi_adc_chip_info = {
492 .enable_dma = 1, /* use dma transfer with this chip*/ 492 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -509,6 +509,13 @@ static struct bfin5xx_spi_chip ad9960_spi_chip_info = {
509}; 509};
510#endif 510#endif
511 511
512#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
513static struct bfin5xx_spi_chip mmc_spi_chip_info = {
514 .enable_dma = 0,
515 .bits_per_word = 8,
516};
517#endif
518
512#if defined(CONFIG_PBX) 519#if defined(CONFIG_PBX)
513static struct bfin5xx_spi_chip spi_si3xxx_chip_info = { 520static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
514 .ctl_reg = 0x4, /* send zero */ 521 .ctl_reg = 0x4, /* send zero */
@@ -593,8 +600,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
593 }, 600 },
594#endif 601#endif
595 602
596#if defined(CONFIG_SPI_ADC_BF533) \ 603#if defined(CONFIG_BFIN_SPI_ADC) \
597 || defined(CONFIG_SPI_ADC_BF533_MODULE) 604 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
598 { 605 {
599 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 606 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
600 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 607 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -624,6 +631,17 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
624 .controller_data = &ad9960_spi_chip_info, 631 .controller_data = &ad9960_spi_chip_info,
625 }, 632 },
626#endif 633#endif
634#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
635 {
636 .modalias = "mmc_spi",
637 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
638 .bus_num = 0,
639 .chip_select = 3,
640 .controller_data = &mmc_spi_chip_info,
641 .mode = SPI_MODE_0,
642 },
643#endif
644
627#if defined(CONFIG_PBX) 645#if defined(CONFIG_PBX)
628 { 646 {
629 .modalias = "fxs-spi", 647 .modalias = "fxs-spi",
@@ -836,7 +854,7 @@ static struct platform_device i2c_bfin_twi_device = {
836#endif 854#endif
837 855
838static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 856static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
839#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 857#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
840 { 858 {
841 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 859 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
842 }, 860 },
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index df6808d8a6ef..c84ddea95749 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -6,14 +6,19 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision B, 08/12/2008; ADSP-BF526 Blackfin Processor Anomaly List 10 * - Revision C, 03/13/2009; ADSP-BF526 Blackfin Processor Anomaly List
11 * - Revision E, 08/18/2008; ADSP-BF527 Blackfin Processor Anomaly List 11 * - Revision F, 03/03/2009; ADSP-BF527 Blackfin Processor Anomaly List
12 */ 12 */
13 13
14#ifndef _MACH_ANOMALY_H_ 14#ifndef _MACH_ANOMALY_H_
15#define _MACH_ANOMALY_H_ 15#define _MACH_ANOMALY_H_
16 16
17/* We do not support old silicon - sorry */
18#if __SILICON_REVISION__ < 0
19# error will not work on BF526/BF527 silicon version
20#endif
21
17#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) 22#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
18# define ANOMALY_BF526 1 23# define ANOMALY_BF526 1
19#else 24#else
@@ -25,158 +30,203 @@
25# define ANOMALY_BF527 0 30# define ANOMALY_BF527 0
26#endif 31#endif
27 32
28/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */ 33#define _ANOMALY_BF526(rev526) (ANOMALY_BF526 && __SILICON_REVISION__ rev526)
34#define _ANOMALY_BF527(rev527) (ANOMALY_BF527 && __SILICON_REVISION__ rev527)
35#define _ANOMALY_BF526_BF527(rev526, rev527) (_ANOMALY_BF526(rev526) || _ANOMALY_BF527(rev527))
36
37/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
29#define ANOMALY_05000074 (1) 38#define ANOMALY_05000074 (1)
30/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */ 39/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
31#define ANOMALY_05000119 (1) /* note: brokenness is noted in documentation, not anomaly sheet */ 40#define ANOMALY_05000119 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
32/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 41/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
33#define ANOMALY_05000122 (1) 42#define ANOMALY_05000122 (1)
34/* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */ 43/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
35#define ANOMALY_05000245 (1) 44#define ANOMALY_05000245 (1)
45/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
46#define ANOMALY_05000254 (1)
36/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ 47/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
37#define ANOMALY_05000265 (1) 48#define ANOMALY_05000265 (1)
38/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */ 49/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
39#define ANOMALY_05000310 (1) 50#define ANOMALY_05000310 (1)
40/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */ 51/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
41#define ANOMALY_05000313 (__SILICON_REVISION__ < 2) 52#define ANOMALY_05000313 (_ANOMALY_BF526_BF527(< 1, < 2))
42/* Incorrect Access of OTP_STATUS During otp_write() Function */ 53/* Incorrect Access of OTP_STATUS During otp_write() Function */
43#define ANOMALY_05000328 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 54#define ANOMALY_05000328 (_ANOMALY_BF527(< 2))
55/* Host DMA Boot Modes Are Not Functional */
56#define ANOMALY_05000330 (__SILICON_REVISION__ < 2)
44/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */ 57/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
45#define ANOMALY_05000337 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 58#define ANOMALY_05000337 (_ANOMALY_BF527(< 2))
46/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */ 59/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
47#define ANOMALY_05000341 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 60#define ANOMALY_05000341 (_ANOMALY_BF527(< 2))
48/* TWI May Not Operate Correctly Under Certain Signal Termination Conditions */ 61/* TWI May Not Operate Correctly Under Certain Signal Termination Conditions */
49#define ANOMALY_05000342 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 62#define ANOMALY_05000342 (_ANOMALY_BF527(< 2))
50/* USB Calibration Value Is Not Initialized */ 63/* USB Calibration Value Is Not Initialized */
51#define ANOMALY_05000346 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 64#define ANOMALY_05000346 (_ANOMALY_BF526_BF527(< 1, < 2))
52/* USB Calibration Value to use */ 65/* USB Calibration Value to use */
53#define ANOMALY_05000346_value 0xE510 66#define ANOMALY_05000346_value 0xE510
54/* Preboot Routine Incorrectly Alters Reset Value of USB Register */ 67/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
55#define ANOMALY_05000347 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 68#define ANOMALY_05000347 (_ANOMALY_BF527(< 2))
56/* Security Features Are Not Functional */ 69/* Security Features Are Not Functional */
57#define ANOMALY_05000348 (ANOMALY_BF527 && __SILICON_REVISION__ < 1) 70#define ANOMALY_05000348 (_ANOMALY_BF527(< 1))
58/* bfrom_SysControl() Firmware Function Performs Improper System Reset */ 71/* bfrom_SysControl() Firmware Function Performs Improper System Reset */
59#define ANOMALY_05000353 (ANOMALY_BF526) 72#define ANOMALY_05000353 (_ANOMALY_BF526(< 1))
60/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */ 73/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
61#define ANOMALY_05000355 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 74#define ANOMALY_05000355 (_ANOMALY_BF527(< 2))
62/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ 75/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
63#define ANOMALY_05000357 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 76#define ANOMALY_05000357 (_ANOMALY_BF527(< 2))
64/* Incorrect Revision Number in DSPID Register */ 77/* Incorrect Revision Number in DSPID Register */
65#define ANOMALY_05000364 (ANOMALY_BF527 && __SILICON_REVISION__ == 1) 78#define ANOMALY_05000364 (_ANOMALY_BF527(== 1))
66/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */ 79/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */
67#define ANOMALY_05000366 (1) 80#define ANOMALY_05000366 (1)
68/* Incorrect Default CSEL Value in PLL_DIV */ 81/* Incorrect Default CSEL Value in PLL_DIV */
69#define ANOMALY_05000368 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 82#define ANOMALY_05000368 (_ANOMALY_BF527(< 2))
70/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */ 83/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
71#define ANOMALY_05000371 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 84#define ANOMALY_05000371 (_ANOMALY_BF527(< 2))
72/* Authentication Fails To Initiate */ 85/* Authentication Fails To Initiate */
73#define ANOMALY_05000376 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 86#define ANOMALY_05000376 (_ANOMALY_BF527(< 2))
74/* Data Read From L3 Memory by USB DMA May be Corrupted */ 87/* Data Read From L3 Memory by USB DMA May be Corrupted */
75#define ANOMALY_05000380 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 88#define ANOMALY_05000380 (_ANOMALY_BF527(< 2))
76/* 8-Bit NAND Flash Boot Mode Not Functional */ 89/* 8-Bit NAND Flash Boot Mode Not Functional */
77#define ANOMALY_05000382 (__SILICON_REVISION__ < 2) 90#define ANOMALY_05000382 (_ANOMALY_BF526_BF527(< 1, < 2))
78/* Host Must Not Read Back During Host DMA Boot */
79#define ANOMALY_05000384 (ANOMALY_BF527 && __SILICON_REVISION__ < 2)
80/* Boot from OTP Memory Not Functional */ 91/* Boot from OTP Memory Not Functional */
81#define ANOMALY_05000385 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 92#define ANOMALY_05000385 (_ANOMALY_BF527(< 2))
82/* bfrom_SysControl() Firmware Routine Not Functional */ 93/* bfrom_SysControl() Firmware Routine Not Functional */
83#define ANOMALY_05000386 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 94#define ANOMALY_05000386 (_ANOMALY_BF527(< 2))
84/* Programmable Preboot Settings Not Functional */ 95/* Programmable Preboot Settings Not Functional */
85#define ANOMALY_05000387 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 96#define ANOMALY_05000387 (_ANOMALY_BF527(< 2))
86/* CRC32 Checksum Support Not Functional */ 97/* CRC32 Checksum Support Not Functional */
87#define ANOMALY_05000388 (__SILICON_REVISION__ < 2) 98#define ANOMALY_05000388 (_ANOMALY_BF526_BF527(< 1, < 2))
88/* Reset Vector Must Not Be in SDRAM Memory Space */ 99/* Reset Vector Must Not Be in SDRAM Memory Space */
89#define ANOMALY_05000389 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 100#define ANOMALY_05000389 (_ANOMALY_BF527(< 2))
90/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */ 101/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
91#define ANOMALY_05000392 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 102#define ANOMALY_05000392 (_ANOMALY_BF527(< 2))
92/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */ 103/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
93#define ANOMALY_05000393 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 104#define ANOMALY_05000393 (_ANOMALY_BF527(< 2))
94/* Log Buffer Not Functional */ 105/* Log Buffer Not Functional */
95#define ANOMALY_05000394 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 106#define ANOMALY_05000394 (_ANOMALY_BF527(< 2))
96/* Hook Routine Not Functional */ 107/* Hook Routine Not Functional */
97#define ANOMALY_05000395 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 108#define ANOMALY_05000395 (_ANOMALY_BF527(< 2))
98/* Header Indirect Bit Not Functional */ 109/* Header Indirect Bit Not Functional */
99#define ANOMALY_05000396 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 110#define ANOMALY_05000396 (_ANOMALY_BF527(< 2))
100/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */ 111/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
101#define ANOMALY_05000397 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 112#define ANOMALY_05000397 (_ANOMALY_BF527(< 2))
102/* SWRESET, DFRESET and WDRESET Bits in the SYSCR Register Not Functional */ 113/* SWRESET, DFRESET and WDRESET Bits in the SYSCR Register Not Functional */
103#define ANOMALY_05000398 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 114#define ANOMALY_05000398 (_ANOMALY_BF527(< 2))
104/* BCODE_NOBOOT in BCODE Field of SYSCR Register Not Functional */ 115/* BCODE_NOBOOT in BCODE Field of SYSCR Register Not Functional */
105#define ANOMALY_05000399 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 116#define ANOMALY_05000399 (_ANOMALY_BF527(< 2))
106/* PPI Data Signals D0 and D8 do not Tristate After Disabling PPI */ 117/* PPI Data Signals D0 and D8 do not Tristate After Disabling PPI */
107#define ANOMALY_05000401 (__SILICON_REVISION__ < 2) 118#define ANOMALY_05000401 (_ANOMALY_BF526_BF527(< 1, < 2))
108/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */ 119/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
109#define ANOMALY_05000403 (__SILICON_REVISION__ < 2) 120#define ANOMALY_05000403 (_ANOMALY_BF526_BF527(< 1, < 2))
110/* Lockbox SESR Disallows Certain User Interrupts */ 121/* Lockbox SESR Disallows Certain User Interrupts */
111#define ANOMALY_05000404 (__SILICON_REVISION__ < 2) 122#define ANOMALY_05000404 (_ANOMALY_BF526_BF527(< 1, < 2))
112/* Lockbox SESR Firmware Does Not Save/Restore Full Context */ 123/* Lockbox SESR Firmware Does Not Save/Restore Full Context */
113#define ANOMALY_05000405 (1) 124#define ANOMALY_05000405 (1)
114/* Lockbox SESR Firmware Arguments Are Not Retained After First Initialization */ 125/* Lockbox SESR Firmware Arguments Are Not Retained After First Initialization */
115#define ANOMALY_05000407 (__SILICON_REVISION__ < 2) 126#define ANOMALY_05000407 (_ANOMALY_BF526_BF527(< 1, < 2))
116/* Lockbox Firmware Memory Cleanup Routine Does not Clear Registers */ 127/* Lockbox Firmware Memory Cleanup Routine Does not Clear Registers */
117#define ANOMALY_05000408 (1) 128#define ANOMALY_05000408 (1)
118/* Lockbox firmware leaves MDMA0 channel enabled */ 129/* Lockbox firmware leaves MDMA0 channel enabled */
119#define ANOMALY_05000409 (__SILICON_REVISION__ < 2) 130#define ANOMALY_05000409 (_ANOMALY_BF526_BF527(< 1, < 2))
120/* Incorrect Default Internal Voltage Regulator Setting */ 131/* Incorrect Default Internal Voltage Regulator Setting */
121#define ANOMALY_05000410 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 132#define ANOMALY_05000410 (_ANOMALY_BF527(< 2))
122/* bfrom_SysControl() Firmware Function Cannot be Used to Enter Power Saving Modes */ 133/* bfrom_SysControl() Firmware Function Cannot be Used to Enter Power Saving Modes */
123#define ANOMALY_05000411 (__SILICON_REVISION__ < 2) 134#define ANOMALY_05000411 (_ANOMALY_BF526_BF527(< 1, < 2))
124/* OTP_CHECK_FOR_PREV_WRITE Bit is Not Functional in bfrom_OtpWrite() API */ 135/* OTP_CHECK_FOR_PREV_WRITE Bit is Not Functional in bfrom_OtpWrite() API */
125#define ANOMALY_05000414 (__SILICON_REVISION__ < 2) 136#define ANOMALY_05000414 (_ANOMALY_BF526_BF527(< 1, < 2))
126/* DEB2_URGENT Bit Not Functional */ 137/* DEB2_URGENT Bit Not Functional */
127#define ANOMALY_05000415 (__SILICON_REVISION__ < 2) 138#define ANOMALY_05000415 (_ANOMALY_BF526_BF527(< 1, < 2))
128/* Speculative Fetches Can Cause Undesired External FIFO Operations */ 139/* Speculative Fetches Can Cause Undesired External FIFO Operations */
129#define ANOMALY_05000416 (1) 140#define ANOMALY_05000416 (1)
130/* SPORT0 Ignores External TSCLK0 on PG14 When TMR6 is an Output */ 141/* SPORT0 Ignores External TSCLK0 on PG14 When TMR6 is an Output */
131#define ANOMALY_05000417 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 142#define ANOMALY_05000417 (_ANOMALY_BF527(< 2))
132/* tSFSPE and tHFSPE Do Not Meet Data Sheet Specifications */ 143/* PPI Timing Requirements tSFSPE and tHFSPE Do Not Meet Data Sheet Specifications */
133#define ANOMALY_05000418 (__SILICON_REVISION__ < 2) 144#define ANOMALY_05000418 (_ANOMALY_BF526_BF527(< 1, < 2))
134/* USB PLL_STABLE Bit May Not Accurately Reflect the USB PLL's Status */ 145/* USB PLL_STABLE Bit May Not Accurately Reflect the USB PLL's Status */
135#define ANOMALY_05000420 (__SILICON_REVISION__ < 2) 146#define ANOMALY_05000420 (_ANOMALY_BF526_BF527(< 1, < 2))
136/* TWI Fall Time (Tof) May Violate the Minimum I2C Specification */ 147/* TWI Fall Time (Tof) May Violate the Minimum I2C Specification */
137#define ANOMALY_05000421 (1) 148#define ANOMALY_05000421 (1)
138/* TWI Input Capacitance (Ci) May Violate the Maximum I2C Specification */ 149/* TWI Input Capacitance (Ci) May Violate the Maximum I2C Specification */
139#define ANOMALY_05000422 (ANOMALY_BF527 && __SILICON_REVISION__ > 1) 150#define ANOMALY_05000422 (_ANOMALY_BF526_BF527(> 0, > 1))
140/* Certain Ethernet Frames With Errors are Misclassified in RMII Mode */ 151/* Certain Ethernet Frames With Errors are Misclassified in RMII Mode */
141#define ANOMALY_05000423 (__SILICON_REVISION__ < 2) 152#define ANOMALY_05000423 (_ANOMALY_BF526_BF527(< 1, < 2))
142/* Internal Voltage Regulator Not Trimmed */ 153/* Internal Voltage Regulator Not Trimmed */
143#define ANOMALY_05000424 (ANOMALY_BF527 && __SILICON_REVISION__ < 2) 154#define ANOMALY_05000424 (_ANOMALY_BF527(< 2))
144/* Multichannel SPORT Channel Misalignment Under Specific Configuration */ 155/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
145#define ANOMALY_05000425 (__SILICON_REVISION__ < 2) 156#define ANOMALY_05000425 (_ANOMALY_BF526_BF527(< 1, < 2))
146/* Speculative Fetches of Indirect-Pointer Instructions Can Cause Spurious Hardware Errors */ 157/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
147#define ANOMALY_05000426 (1) 158#define ANOMALY_05000426 (1)
148/* WB_EDGE Bit in NFC_IRQSTAT Incorrectly Reflects Buffer Status Instead of IRQ Status */ 159/* WB_EDGE Bit in NFC_IRQSTAT Incorrectly Reflects Buffer Status Instead of IRQ Status */
149#define ANOMALY_05000429 (__SILICON_REVISION__ < 2) 160#define ANOMALY_05000429 (_ANOMALY_BF526_BF527(< 1, < 2))
150/* Software System Reset Corrupts PLL_LOCKCNT Register */ 161/* Software System Reset Corrupts PLL_LOCKCNT Register */
151#define ANOMALY_05000430 (ANOMALY_BF527 && __SILICON_REVISION__ > 1) 162#define ANOMALY_05000430 (_ANOMALY_BF527(> 1))
163/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
164#define ANOMALY_05000431 (1)
152/* bfrom_SysControl() Does Not Clear SIC_IWR1 Before Executing PLL Programming Sequence */ 165/* bfrom_SysControl() Does Not Clear SIC_IWR1 Before Executing PLL Programming Sequence */
153#define ANOMALY_05000432 (ANOMALY_BF526) 166#define ANOMALY_05000432 (_ANOMALY_BF526(< 1))
154/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */ 167/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */
155#define ANOMALY_05000435 ((ANOMALY_BF526 && __SILICON_REVISION__ < 1) || ANOMALY_BF527) 168#define ANOMALY_05000435 (_ANOMALY_BF526_BF527(< 1, >= 0))
169/* Preboot Cannot be Used to Alter the PLL_DIV Register */
170#define ANOMALY_05000439 (_ANOMALY_BF526_BF527(< 1, >= 0))
171/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */
172#define ANOMALY_05000440 (_ANOMALY_BF526_BF527(< 1, >= 0))
173/* OTP Write Accesses Not Supported */
174#define ANOMALY_05000442 (_ANOMALY_BF527(< 1))
156/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 175/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
157#define ANOMALY_05000443 (1) 176#define ANOMALY_05000443 (1)
177/* The WURESET Bit in the SYSCR Register is not Functional */
178#define ANOMALY_05000445 (1)
179/* BCODE_QUICKBOOT, BCODE_ALLBOOT, and BCODE_FULLBOOT Settings in SYSCR Register Not Functional */
180#define ANOMALY_05000451 (1)
181/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
182#define ANOMALY_05000452 (_ANOMALY_BF526_BF527(< 1, >= 0))
183/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
184#define ANOMALY_05000456 (1)
185/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
186#define ANOMALY_05000457 (1)
187/* False Hardware Error when RETI points to invalid memory */
188#define ANOMALY_05000461 (1)
158 189
159/* Anomalies that don't exist on this proc */ 190/* Anomalies that don't exist on this proc */
191#define ANOMALY_05000099 (0)
192#define ANOMALY_05000120 (0)
160#define ANOMALY_05000125 (0) 193#define ANOMALY_05000125 (0)
194#define ANOMALY_05000149 (0)
161#define ANOMALY_05000158 (0) 195#define ANOMALY_05000158 (0)
196#define ANOMALY_05000171 (0)
197#define ANOMALY_05000179 (0)
162#define ANOMALY_05000183 (0) 198#define ANOMALY_05000183 (0)
163#define ANOMALY_05000198 (0) 199#define ANOMALY_05000198 (0)
200#define ANOMALY_05000215 (0)
201#define ANOMALY_05000220 (0)
202#define ANOMALY_05000227 (0)
164#define ANOMALY_05000230 (0) 203#define ANOMALY_05000230 (0)
204#define ANOMALY_05000231 (0)
205#define ANOMALY_05000233 (0)
206#define ANOMALY_05000242 (0)
165#define ANOMALY_05000244 (0) 207#define ANOMALY_05000244 (0)
208#define ANOMALY_05000248 (0)
209#define ANOMALY_05000250 (0)
166#define ANOMALY_05000261 (0) 210#define ANOMALY_05000261 (0)
167#define ANOMALY_05000263 (0) 211#define ANOMALY_05000263 (0)
168#define ANOMALY_05000266 (0) 212#define ANOMALY_05000266 (0)
169#define ANOMALY_05000273 (0) 213#define ANOMALY_05000273 (0)
214#define ANOMALY_05000274 (0)
170#define ANOMALY_05000278 (0) 215#define ANOMALY_05000278 (0)
171#define ANOMALY_05000285 (0) 216#define ANOMALY_05000285 (0)
217#define ANOMALY_05000287 (0)
218#define ANOMALY_05000301 (0)
172#define ANOMALY_05000305 (0) 219#define ANOMALY_05000305 (0)
173#define ANOMALY_05000307 (0) 220#define ANOMALY_05000307 (0)
174#define ANOMALY_05000311 (0) 221#define ANOMALY_05000311 (0)
175#define ANOMALY_05000312 (0) 222#define ANOMALY_05000312 (0)
176#define ANOMALY_05000323 (0) 223#define ANOMALY_05000323 (0)
224#define ANOMALY_05000362 (1)
177#define ANOMALY_05000363 (0) 225#define ANOMALY_05000363 (0)
226#define ANOMALY_05000400 (0)
178#define ANOMALY_05000412 (0) 227#define ANOMALY_05000412 (0)
179#define ANOMALY_05000447 (0) 228#define ANOMALY_05000447 (0)
180#define ANOMALY_05000448 (0) 229#define ANOMALY_05000448 (0)
230#define ANOMALY_05000450 (0)
181 231
182#endif 232#endif
diff --git a/arch/blackfin/mach-bf533/Kconfig b/arch/blackfin/mach-bf533/Kconfig
index 14427de7d77f..4c572443147e 100644
--- a/arch/blackfin/mach-bf533/Kconfig
+++ b/arch/blackfin/mach-bf533/Kconfig
@@ -59,6 +59,7 @@ config DMA7_UARTTX
59 default 10 59 default 10
60config TIMER0 60config TIMER0
61 int "TIMER0" 61 int "TIMER0"
62 default 7 if TICKSOURCE_GPTMR0
62 default 8 63 default 8
63config TIMER1 64config TIMER1
64 int "TIMER1" 65 int "TIMER1"
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 0c66bf44cfab..895f213ea454 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -173,7 +173,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
173}; 173};
174#endif 174#endif
175 175
176#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 176#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
177/* SPI ADC chip */ 177/* SPI ADC chip */
178static struct bfin5xx_spi_chip spi_adc_chip_info = { 178static struct bfin5xx_spi_chip spi_adc_chip_info = {
179 .ctl_reg = 0x1000, 179 .ctl_reg = 0x1000,
@@ -216,7 +216,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
216 }, 216 },
217#endif 217#endif
218 218
219#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 219#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
220 { 220 {
221 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 221 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
222 .max_speed_hz = 4, /* actual baudrate is SCLK/(2xspeed_hz) */ 222 .max_speed_hz = 4, /* actual baudrate is SCLK/(2xspeed_hz) */
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index e8974878d8c2..a727e538fa28 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -82,7 +82,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
82#endif 82#endif
83 83
84/* SPI ADC chip */ 84/* SPI ADC chip */
85#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 85#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
86static struct bfin5xx_spi_chip spi_adc_chip_info = { 86static struct bfin5xx_spi_chip spi_adc_chip_info = {
87 .enable_dma = 1, /* use dma transfer with this chip*/ 87 .enable_dma = 1, /* use dma transfer with this chip*/
88 .bits_per_word = 16, 88 .bits_per_word = 16,
@@ -117,7 +117,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
117 }, 117 },
118#endif 118#endif
119 119
120#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 120#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
121 { 121 {
122 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 122 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
123 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 123 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 08cd0969de47..842f1c9c2393 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -118,7 +118,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
118}; 118};
119#endif 119#endif
120 120
121#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 121#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
122/* SPI ADC chip */ 122/* SPI ADC chip */
123static struct bfin5xx_spi_chip spi_adc_chip_info = { 123static struct bfin5xx_spi_chip spi_adc_chip_info = {
124 .enable_dma = 1, /* use dma transfer with this chip*/ 124 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -154,7 +154,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
154 }, 154 },
155#endif 155#endif
156 156
157#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 157#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
158 { 158 {
159 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 159 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
160 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 160 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index db96f33f72e2..e19c565ade16 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -192,7 +192,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
192}; 192};
193#endif 193#endif
194 194
195#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 195#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
196/* SPI ADC chip */ 196/* SPI ADC chip */
197static struct bfin5xx_spi_chip spi_adc_chip_info = { 197static struct bfin5xx_spi_chip spi_adc_chip_info = {
198 .enable_dma = 1, /* use dma transfer with this chip*/ 198 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -237,7 +237,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
237 }, 237 },
238#endif 238#endif
239 239
240#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 240#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
241 { 241 {
242 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 242 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
243 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 243 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -448,7 +448,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
448 .irq = 39, 448 .irq = 39,
449 }, 449 },
450#endif 450#endif
451#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 451#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
452 { 452 {
453 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 453 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
454 }, 454 },
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 1cf893e2e55b..31145b509e20 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -6,7 +6,7 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision E, 09/18/2008; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List 10 * - Revision E, 09/18/2008; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
11 */ 11 */
12 12
@@ -34,12 +34,12 @@
34# define ANOMALY_BF533 0 34# define ANOMALY_BF533 0
35#endif 35#endif
36 36
37/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot 2 Not Supported */ 37/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
38#define ANOMALY_05000074 (1) 38#define ANOMALY_05000074 (1)
39/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */ 39/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
40#define ANOMALY_05000099 (__SILICON_REVISION__ < 5) 40#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
41/* Watchpoint Status Register (WPSTAT) Bits Are Set on Every Corresponding Match */ 41/* Watchpoint Status Register (WPSTAT) Bits Are Set on Every Corresponding Match */
42#define ANOMALY_05000105 (1) 42#define ANOMALY_05000105 (__SILICON_REVISION__ > 2)
43/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */ 43/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
44#define ANOMALY_05000119 (1) 44#define ANOMALY_05000119 (1)
45/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 45/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
@@ -48,7 +48,7 @@
48#define ANOMALY_05000158 (__SILICON_REVISION__ < 5) 48#define ANOMALY_05000158 (__SILICON_REVISION__ < 5)
49/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */ 49/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
50#define ANOMALY_05000166 (1) 50#define ANOMALY_05000166 (1)
51/* Turning Serial Ports on with External Frame Syncs */ 51/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
52#define ANOMALY_05000167 (1) 52#define ANOMALY_05000167 (1)
53/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */ 53/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
54#define ANOMALY_05000179 (__SILICON_REVISION__ < 5) 54#define ANOMALY_05000179 (__SILICON_REVISION__ < 5)
@@ -67,9 +67,9 @@
67/* Current DMA Address Shows Wrong Value During Carry Fix */ 67/* Current DMA Address Shows Wrong Value During Carry Fix */
68#define ANOMALY_05000199 (__SILICON_REVISION__ < 4) 68#define ANOMALY_05000199 (__SILICON_REVISION__ < 4)
69/* SPORT TFS and DT Are Incorrectly Driven During Inactive Channels in Certain Conditions */ 69/* SPORT TFS and DT Are Incorrectly Driven During Inactive Channels in Certain Conditions */
70#define ANOMALY_05000200 (__SILICON_REVISION__ < 5) 70#define ANOMALY_05000200 (__SILICON_REVISION__ == 3 || __SILICON_REVISION__ == 4)
71/* Receive Frame Sync Not Ignored During Active Frames in SPORT Multi-Channel Mode */ 71/* Receive Frame Sync Not Ignored During Active Frames in SPORT Multi-Channel Mode */
72#define ANOMALY_05000201 (__SILICON_REVISION__ < 4) 72#define ANOMALY_05000201 (__SILICON_REVISION__ == 3)
73/* Possible Infinite Stall with Specific Dual-DAG Situation */ 73/* Possible Infinite Stall with Specific Dual-DAG Situation */
74#define ANOMALY_05000202 (__SILICON_REVISION__ < 5) 74#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
75/* Specific Sequence That Can Cause DMA Error or DMA Stopping */ 75/* Specific Sequence That Can Cause DMA Error or DMA Stopping */
@@ -104,7 +104,7 @@
104#define ANOMALY_05000242 (__SILICON_REVISION__ < 5) 104#define ANOMALY_05000242 (__SILICON_REVISION__ < 5)
105/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */ 105/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
106#define ANOMALY_05000244 (__SILICON_REVISION__ < 5) 106#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
107/* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */ 107/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
108#define ANOMALY_05000245 (1) 108#define ANOMALY_05000245 (1)
109/* Data CPLBs Should Prevent Spurious Hardware Errors */ 109/* Data CPLBs Should Prevent Spurious Hardware Errors */
110#define ANOMALY_05000246 (__SILICON_REVISION__ < 5) 110#define ANOMALY_05000246 (__SILICON_REVISION__ < 5)
@@ -137,7 +137,7 @@
137/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */ 137/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
138#define ANOMALY_05000270 (__SILICON_REVISION__ < 5) 138#define ANOMALY_05000270 (__SILICON_REVISION__ < 5)
139/* Spontaneous Reset of Internal Voltage Regulator */ 139/* Spontaneous Reset of Internal Voltage Regulator */
140#define ANOMALY_05000271 (__SILICON_REVISION__ < 4) 140#define ANOMALY_05000271 (__SILICON_REVISION__ == 3)
141/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */ 141/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
142#define ANOMALY_05000272 (1) 142#define ANOMALY_05000272 (1)
143/* Writes to Synchronous SDRAM Memory May Be Lost */ 143/* Writes to Synchronous SDRAM Memory May Be Lost */
@@ -165,14 +165,14 @@
165/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */ 165/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */
166#define ANOMALY_05000306 (__SILICON_REVISION__ < 5) 166#define ANOMALY_05000306 (__SILICON_REVISION__ < 5)
167/* SCKELOW Bit Does Not Maintain State Through Hibernate */ 167/* SCKELOW Bit Does Not Maintain State Through Hibernate */
168#define ANOMALY_05000307 (1) 168#define ANOMALY_05000307 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
169/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */ 169/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
170#define ANOMALY_05000310 (1) 170#define ANOMALY_05000310 (1)
171/* Erroneous Flag (GPIO) Pin Operations under Specific Sequences */ 171/* Erroneous Flag (GPIO) Pin Operations under Specific Sequences */
172#define ANOMALY_05000311 (__SILICON_REVISION__ < 6) 172#define ANOMALY_05000311 (__SILICON_REVISION__ < 6)
173/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */ 173/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
174#define ANOMALY_05000312 (__SILICON_REVISION__ < 6) 174#define ANOMALY_05000312 (__SILICON_REVISION__ < 6)
175/* PPI Is Level-Sensitive on First Transfer */ 175/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
176#define ANOMALY_05000313 (__SILICON_REVISION__ < 6) 176#define ANOMALY_05000313 (__SILICON_REVISION__ < 6)
177/* Killed System MMR Write Completes Erroneously On Next System MMR Access */ 177/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
178#define ANOMALY_05000315 (__SILICON_REVISION__ < 6) 178#define ANOMALY_05000315 (__SILICON_REVISION__ < 6)
@@ -200,17 +200,63 @@
200#define ANOMALY_05000426 (1) 200#define ANOMALY_05000426 (1)
201/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 201/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
202#define ANOMALY_05000443 (1) 202#define ANOMALY_05000443 (1)
203/* False Hardware Error when RETI points to invalid memory */
204#define ANOMALY_05000461 (1)
203 205
204/* These anomalies have been "phased" out of analog.com anomaly sheets and are 206/* These anomalies have been "phased" out of analog.com anomaly sheets and are
205 * here to show running on older silicon just isn't feasible. 207 * here to show running on older silicon just isn't feasible.
206 */ 208 */
207 209
210/* Internal voltage regulator can't be modified via register writes */
211#define ANOMALY_05000066 (__SILICON_REVISION__ < 2)
208/* Watchpoints (Hardware Breakpoints) are not supported */ 212/* Watchpoints (Hardware Breakpoints) are not supported */
209#define ANOMALY_05000067 (__SILICON_REVISION__ < 3) 213#define ANOMALY_05000067 (__SILICON_REVISION__ < 3)
214/* SDRAM PSSE bit cannot be set again after SDRAM Powerup */
215#define ANOMALY_05000070 (__SILICON_REVISION__ < 2)
216/* Writing FIO_DIR can corrupt a programmable flag's data */
217#define ANOMALY_05000079 (__SILICON_REVISION__ < 2)
218/* Timer Auto-Baud Mode requires the UART clock to be enabled */
219#define ANOMALY_05000086 (__SILICON_REVISION__ < 2)
220/* Internal Clocking Modes on SPORT0 not supported */
221#define ANOMALY_05000088 (__SILICON_REVISION__ < 2)
222/* Internal voltage regulator does not wake up from an RTC wakeup */
223#define ANOMALY_05000092 (__SILICON_REVISION__ < 2)
224/* The IFLUSH instruction must be preceded by a CSYNC instruction */
225#define ANOMALY_05000093 (__SILICON_REVISION__ < 2)
226/* Vectoring to an instruction that is presently being filled into the instruction cache may cause erroneous behavior */
227#define ANOMALY_05000095 (__SILICON_REVISION__ < 2)
228/* PREFETCH, FLUSH, and FLUSHINV must be followed by a CSYNC */
229#define ANOMALY_05000096 (__SILICON_REVISION__ < 2)
230/* Performance Monitor 0 and 1 are swapped when monitoring memory events */
231#define ANOMALY_05000097 (__SILICON_REVISION__ < 2)
232/* 32-bit SPORT DMA will be word reversed */
233#define ANOMALY_05000098 (__SILICON_REVISION__ < 2)
234/* Incorrect status in the UART_IIR register */
235#define ANOMALY_05000100 (__SILICON_REVISION__ < 2)
236/* Reading X_MODIFY or Y_MODIFY while DMA channel is active */
237#define ANOMALY_05000101 (__SILICON_REVISION__ < 2)
238/* Descriptor-based MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
239#define ANOMALY_05000102 (__SILICON_REVISION__ < 2)
240/* Incorrect value written to the cycle counters */
241#define ANOMALY_05000103 (__SILICON_REVISION__ < 2)
242/* Stores to L1 Data memory incorrect when a specific sequence is followed */
243#define ANOMALY_05000104 (__SILICON_REVISION__ < 2)
244/* Programmable Flag (PF3) functionality not supported in all PPI modes */
245#define ANOMALY_05000106 (__SILICON_REVISION__ < 2)
246/* Data store can be lost when targeting a cache line fill */
247#define ANOMALY_05000107 (__SILICON_REVISION__ < 2)
210/* Reserved bits in SYSCFG register not set at power on */ 248/* Reserved bits in SYSCFG register not set at power on */
211#define ANOMALY_05000109 (__SILICON_REVISION__ < 3) 249#define ANOMALY_05000109 (__SILICON_REVISION__ < 3)
250/* Infinite Core Stall */
251#define ANOMALY_05000114 (__SILICON_REVISION__ < 2)
252/* PPI_FSx may glitch when generated by the on chip Timers */
253#define ANOMALY_05000115 (__SILICON_REVISION__ < 2)
212/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */ 254/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
213#define ANOMALY_05000116 (__SILICON_REVISION__ < 3) 255#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
256/* DTEST registers allow access to Data Cache when DTEST_COMMAND< 14 >= 0 */
257#define ANOMALY_05000117 (__SILICON_REVISION__ < 2)
258/* Booting from an 8-bit or 24-bit Addressable SPI device is not supported */
259#define ANOMALY_05000118 (__SILICON_REVISION__ < 2)
214/* DTEST_COMMAND initiated memory access may be incorrect if data cache or DMA is active */ 260/* DTEST_COMMAND initiated memory access may be incorrect if data cache or DMA is active */
215#define ANOMALY_05000123 (__SILICON_REVISION__ < 3) 261#define ANOMALY_05000123 (__SILICON_REVISION__ < 3)
216/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */ 262/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
@@ -222,7 +268,9 @@
222/* DMEM_CONTROL is not set on Reset */ 268/* DMEM_CONTROL is not set on Reset */
223#define ANOMALY_05000137 (__SILICON_REVISION__ < 3) 269#define ANOMALY_05000137 (__SILICON_REVISION__ < 3)
224/* SPI boot will not complete if there is a zero fill block in the loader file */ 270/* SPI boot will not complete if there is a zero fill block in the loader file */
225#define ANOMALY_05000138 (__SILICON_REVISION__ < 3) 271#define ANOMALY_05000138 (__SILICON_REVISION__ == 2)
272/* Timerx_Config must be set for using the PPI in GP output mode with internal Frame Syncs */
273#define ANOMALY_05000139 (__SILICON_REVISION__ < 2)
226/* Allowing the SPORT RX FIFO to fill will cause an overflow */ 274/* Allowing the SPORT RX FIFO to fill will cause an overflow */
227#define ANOMALY_05000140 (__SILICON_REVISION__ < 3) 275#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
228/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */ 276/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
@@ -237,17 +285,17 @@
237#define ANOMALY_05000145 (__SILICON_REVISION__ < 3) 285#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
238/* MDMA may lose the first few words of a descriptor chain */ 286/* MDMA may lose the first few words of a descriptor chain */
239#define ANOMALY_05000146 (__SILICON_REVISION__ < 3) 287#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
240/* The source MDMA descriptor may stop with a DMA Error */ 288/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
241#define ANOMALY_05000147 (__SILICON_REVISION__ < 3) 289#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
242/* When booting from a 16-bit asynchronous memory device, the upper 8-bits of each word must be 0x00 */ 290/* When booting from a 16-bit asynchronous memory device, the upper 8-bits of each word must be 0x00 */
243#define ANOMALY_05000148 (__SILICON_REVISION__ < 3) 291#define ANOMALY_05000148 (__SILICON_REVISION__ < 3)
244/* Frame Delay in SPORT Multichannel Mode */ 292/* Frame Delay in SPORT Multichannel Mode */
245#define ANOMALY_05000153 (__SILICON_REVISION__ < 3) 293#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
246/* SPORT TFS signal is active in Multi-channel mode outside of valid channels */ 294/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
247#define ANOMALY_05000154 (__SILICON_REVISION__ < 3) 295#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
248/* Timer1 can not be used for PWMOUT mode when a certain PPI mode is in use */ 296/* Timer1 can not be used for PWMOUT mode when a certain PPI mode is in use */
249#define ANOMALY_05000155 (__SILICON_REVISION__ < 3) 297#define ANOMALY_05000155 (__SILICON_REVISION__ < 3)
250/* A killed 32-bit System MMR write will lead to the next system MMR access thinking it should be 32-bit. */ 298/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
251#define ANOMALY_05000157 (__SILICON_REVISION__ < 3) 299#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
252/* SPORT transmit data is not gated by external frame sync in certain conditions */ 300/* SPORT transmit data is not gated by external frame sync in certain conditions */
253#define ANOMALY_05000163 (__SILICON_REVISION__ < 3) 301#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
@@ -275,15 +323,27 @@
275#define ANOMALY_05000206 (__SILICON_REVISION__ < 3) 323#define ANOMALY_05000206 (__SILICON_REVISION__ < 3)
276 324
277/* Anomalies that don't exist on this proc */ 325/* Anomalies that don't exist on this proc */
326#define ANOMALY_05000120 (0)
327#define ANOMALY_05000149 (0)
328#define ANOMALY_05000171 (0)
329#define ANOMALY_05000220 (0)
330#define ANOMALY_05000248 (0)
278#define ANOMALY_05000266 (0) 331#define ANOMALY_05000266 (0)
332#define ANOMALY_05000274 (0)
333#define ANOMALY_05000287 (0)
279#define ANOMALY_05000323 (0) 334#define ANOMALY_05000323 (0)
280#define ANOMALY_05000353 (1) 335#define ANOMALY_05000353 (1)
336#define ANOMALY_05000362 (1)
281#define ANOMALY_05000380 (0) 337#define ANOMALY_05000380 (0)
282#define ANOMALY_05000386 (1) 338#define ANOMALY_05000386 (1)
339#define ANOMALY_05000389 (0)
283#define ANOMALY_05000412 (0) 340#define ANOMALY_05000412 (0)
341#define ANOMALY_05000430 (0)
284#define ANOMALY_05000432 (0) 342#define ANOMALY_05000432 (0)
285#define ANOMALY_05000435 (0) 343#define ANOMALY_05000435 (0)
286#define ANOMALY_05000447 (0) 344#define ANOMALY_05000447 (0)
287#define ANOMALY_05000448 (0) 345#define ANOMALY_05000448 (0)
346#define ANOMALY_05000456 (0)
347#define ANOMALY_05000450 (0)
288 348
289#endif 349#endif
diff --git a/arch/blackfin/mach-bf537/Kconfig b/arch/blackfin/mach-bf537/Kconfig
index bbc08fd4f122..d81224f9d723 100644
--- a/arch/blackfin/mach-bf537/Kconfig
+++ b/arch/blackfin/mach-bf537/Kconfig
@@ -66,6 +66,7 @@ config IRQ_MAC_TX
66 default 11 66 default 11
67config IRQ_TIMER0 67config IRQ_TIMER0
68 int "IRQ_TIMER0" 68 int "IRQ_TIMER0"
69 default 7 if TICKSOURCE_GPTMR0
69 default 8 70 default 8
70config IRQ_TIMER1 71config IRQ_TIMER1
71 int "IRQ_TIMER1" 72 int "IRQ_TIMER1"
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 41c75b9bfac0..4fee19673127 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -86,7 +86,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
86}; 86};
87#endif 87#endif
88 88
89#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 89#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
90/* SPI ADC chip */ 90/* SPI ADC chip */
91static struct bfin5xx_spi_chip spi_adc_chip_info = { 91static struct bfin5xx_spi_chip spi_adc_chip_info = {
92 .enable_dma = 1, /* use dma transfer with this chip*/ 92 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -129,7 +129,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
129 }, 129 },
130#endif 130#endif
131 131
132#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 132#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
133 { 133 {
134 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 134 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
135 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 135 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 4e1de1e53f89..26707ce39f29 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -265,8 +265,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
265}; 265};
266#endif 266#endif
267 267
268#if defined(CONFIG_SPI_ADC_BF533) \ 268#if defined(CONFIG_BFIN_SPI_ADC) \
269 || defined(CONFIG_SPI_ADC_BF533_MODULE) 269 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
270/* SPI ADC chip */ 270/* SPI ADC chip */
271static struct bfin5xx_spi_chip spi_adc_chip_info = { 271static struct bfin5xx_spi_chip spi_adc_chip_info = {
272 .enable_dma = 1, /* use dma transfer with this chip*/ 272 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -333,8 +333,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
333 }, 333 },
334#endif 334#endif
335 335
336#if defined(CONFIG_SPI_ADC_BF533) \ 336#if defined(CONFIG_BFIN_SPI_ADC) \
337 || defined(CONFIG_SPI_ADC_BF533_MODULE) 337 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
338 { 338 {
339 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 339 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
340 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 340 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 0572926da23f..dfb5036f8a6b 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -508,8 +508,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
508}; 508};
509#endif 509#endif
510 510
511#if defined(CONFIG_SPI_ADC_BF533) \ 511#if defined(CONFIG_BFIN_SPI_ADC) \
512 || defined(CONFIG_SPI_ADC_BF533_MODULE) 512 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
513/* SPI ADC chip */ 513/* SPI ADC chip */
514static struct bfin5xx_spi_chip spi_adc_chip_info = { 514static struct bfin5xx_spi_chip spi_adc_chip_info = {
515 .enable_dma = 1, /* use dma transfer with this chip*/ 515 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -607,6 +607,43 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
607}; 607};
608#endif 608#endif
609 609
610#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
611#include <linux/input.h>
612#include <linux/spi/adxl34x.h>
613static const struct adxl34x_platform_data adxl34x_info = {
614 .x_axis_offset = 0,
615 .y_axis_offset = 0,
616 .z_axis_offset = 0,
617 .tap_threshold = 0x31,
618 .tap_duration = 0x10,
619 .tap_latency = 0x60,
620 .tap_window = 0xF0,
621 .tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
622 .act_axis_control = 0xFF,
623 .activity_threshold = 5,
624 .inactivity_threshold = 3,
625 .inactivity_time = 4,
626 .free_fall_threshold = 0x7,
627 .free_fall_time = 0x20,
628 .data_rate = 0x8,
629 .data_range = ADXL_FULL_RES,
630
631 .ev_type = EV_ABS,
632 .ev_code_x = ABS_X, /* EV_REL */
633 .ev_code_y = ABS_Y, /* EV_REL */
634 .ev_code_z = ABS_Z, /* EV_REL */
635
636 .ev_code_tap_x = BTN_TOUCH, /* EV_KEY */
637 .ev_code_tap_y = BTN_TOUCH, /* EV_KEY */
638 .ev_code_tap_z = BTN_TOUCH, /* EV_KEY */
639
640/* .ev_code_ff = KEY_F,*/ /* EV_KEY */
641/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
642 .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
643 .fifo_mode = ADXL_FIFO_STREAM,
644};
645#endif
646
610#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 647#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
611static struct bfin5xx_spi_chip spi_ad7879_chip_info = { 648static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
612 .enable_dma = 0, 649 .enable_dma = 0,
@@ -695,8 +732,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
695 .mode = SPI_MODE_3, 732 .mode = SPI_MODE_3,
696 }, 733 },
697#endif 734#endif
698#if defined(CONFIG_SPI_ADC_BF533) \ 735#if defined(CONFIG_BFIN_SPI_ADC) \
699 || defined(CONFIG_SPI_ADC_BF533_MODULE) 736 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
700 { 737 {
701 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 738 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
702 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 739 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
@@ -1280,7 +1317,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
1280 .irq = IRQ_PF5, 1317 .irq = IRQ_PF5,
1281 }, 1318 },
1282#endif 1319#endif
1283#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 1320#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
1284 { 1321 {
1285 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 1322 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
1286 }, 1323 },
@@ -1312,6 +1349,13 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
1312 .platform_data = (void *)&adp5520_pdev_data, 1349 .platform_data = (void *)&adp5520_pdev_data,
1313 }, 1350 },
1314#endif 1351#endif
1352#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE)
1353 {
1354 I2C_BOARD_INFO("adxl34x", 0x53),
1355 .irq = IRQ_PG3,
1356 .platform_data = (void *)&adxl34x_info,
1357 },
1358#endif
1315}; 1359};
1316 1360
1317#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1361#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -1358,16 +1402,18 @@ static struct resource bfin_pata_resources[] = {
1358static struct pata_platform_info bfin_pata_platform_data = { 1402static struct pata_platform_info bfin_pata_platform_data = {
1359 .ioport_shift = 0, 1403 .ioport_shift = 0,
1360}; 1404};
1361 1405/* CompactFlash Storage Card Memory Mapped Adressing
1406 * /REG = A11 = 1
1407 */
1362static struct resource bfin_pata_resources[] = { 1408static struct resource bfin_pata_resources[] = {
1363 { 1409 {
1364 .start = 0x20211820, 1410 .start = 0x20211800,
1365 .end = 0x2021183F, 1411 .end = 0x20211807,
1366 .flags = IORESOURCE_MEM, 1412 .flags = IORESOURCE_MEM,
1367 }, 1413 },
1368 { 1414 {
1369 .start = 0x2021181C, 1415 .start = 0x2021180E, /* Device Ctl */
1370 .end = 0x2021181F, 1416 .end = 0x2021180E,
1371 .flags = IORESOURCE_MEM, 1417 .flags = IORESOURCE_MEM,
1372 }, 1418 },
1373}; 1419};
@@ -1527,7 +1573,8 @@ static int __init stamp_init(void)
1527 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); 1573 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
1528 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 1574 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
1529 1575
1530#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 1576#if (defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)) \
1577 && defined(PATA_INT)
1531 irq_desc[PATA_INT].status |= IRQ_NOAUTOEN; 1578 irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
1532#endif 1579#endif
1533 1580
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 53ad10f3cd76..280574591201 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -86,7 +86,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
86}; 86};
87#endif 87#endif
88 88
89#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 89#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
90/* SPI ADC chip */ 90/* SPI ADC chip */
91static struct bfin5xx_spi_chip spi_adc_chip_info = { 91static struct bfin5xx_spi_chip spi_adc_chip_info = {
92 .enable_dma = 1, /* use dma transfer with this chip*/ 92 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -129,7 +129,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
129 }, 129 },
130#endif 130#endif
131 131
132#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 132#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
133 { 133 {
134 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 134 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
135 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 135 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 1bfd80c26c90..fc9663425465 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -6,7 +6,7 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision D, 09/18/2008; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List 10 * - Revision D, 09/18/2008; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
11 */ 11 */
12 12
@@ -36,77 +36,75 @@
36 36
37/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */ 37/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
38#define ANOMALY_05000074 (1) 38#define ANOMALY_05000074 (1)
39/* DMA_RUN bit is not valid after a Peripheral Receive Channel DMA stops */ 39/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
40#define ANOMALY_05000119 (1) 40#define ANOMALY_05000119 (1)
41/* Rx.H cannot be used to access 16-bit System MMR registers */ 41/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
42#define ANOMALY_05000122 (1) 42#define ANOMALY_05000122 (1)
43/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */ 43/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
44#define ANOMALY_05000157 (__SILICON_REVISION__ < 2) 44#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
45/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */ 45/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
46#define ANOMALY_05000167 (1)
47/* PPI_DELAY not functional in PPI modes with 0 frame syncs */
48#define ANOMALY_05000180 (1) 46#define ANOMALY_05000180 (1)
49/* Instruction Cache Is Not Functional */ 47/* Instruction Cache Is Not Functional */
50#define ANOMALY_05000237 (__SILICON_REVISION__ < 2) 48#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
51/* If i-cache is on, CSYNC/SSYNC/IDLE around Change of Control causes failures */ 49/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
52#define ANOMALY_05000244 (__SILICON_REVISION__ < 3) 50#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
53/* Spurious Hardware Error from an access in the shadow of a conditional branch */ 51/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
54#define ANOMALY_05000245 (1) 52#define ANOMALY_05000245 (1)
55/* CLKIN Buffer Output Enable Reset Behavior Is Changed */ 53/* CLKIN Buffer Output Enable Reset Behavior Is Changed */
56#define ANOMALY_05000247 (1) 54#define ANOMALY_05000247 (1)
57/* Incorrect Bit-Shift of Data Word in Multichannel (TDM) mode in certain conditions */ 55/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
58#define ANOMALY_05000250 (__SILICON_REVISION__ < 3) 56#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
59/* EMAC Tx DMA error after an early frame abort */ 57/* EMAC Tx DMA error after an early frame abort */
60#define ANOMALY_05000252 (__SILICON_REVISION__ < 3) 58#define ANOMALY_05000252 (__SILICON_REVISION__ < 3)
61/* Maximum external clock speed for Timers */ 59/* Maximum External Clock Speed for Timers */
62#define ANOMALY_05000253 (__SILICON_REVISION__ < 3) 60#define ANOMALY_05000253 (__SILICON_REVISION__ < 3)
63/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT mode with external clock */ 61/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
64#define ANOMALY_05000254 (__SILICON_REVISION__ > 2) 62#define ANOMALY_05000254 (__SILICON_REVISION__ > 2)
65/* Entering Hibernate Mode with RTC Seconds event interrupt not functional */ 63/* Entering Hibernate State with RTC Seconds Interrupt Not Functional */
66#define ANOMALY_05000255 (__SILICON_REVISION__ < 3) 64#define ANOMALY_05000255 (__SILICON_REVISION__ < 3)
67/* EMAC MDIO input latched on wrong MDC edge */ 65/* EMAC MDIO input latched on wrong MDC edge */
68#define ANOMALY_05000256 (__SILICON_REVISION__ < 3) 66#define ANOMALY_05000256 (__SILICON_REVISION__ < 3)
69/* Interrupt/Exception during short hardware loop may cause bad instruction fetches */ 67/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
70#define ANOMALY_05000257 (__SILICON_REVISION__ < 3) 68#define ANOMALY_05000257 (__SILICON_REVISION__ < 3)
71/* Instruction Cache is corrupted when bits 9 and 12 of the ICPLB Data registers differ */ 69/* Instruction Cache Is Corrupted When Bits 9 and 12 of the ICPLB Data Registers Differ */
72#define ANOMALY_05000258 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ == 1) || __SILICON_REVISION__ == 2) 70#define ANOMALY_05000258 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ == 1) || __SILICON_REVISION__ == 2)
73/* ICPLB_STATUS MMR register may be corrupted */ 71/* ICPLB_STATUS MMR Register May Be Corrupted */
74#define ANOMALY_05000260 (__SILICON_REVISION__ == 2) 72#define ANOMALY_05000260 (__SILICON_REVISION__ == 2)
75/* DCPLB_FAULT_ADDR MMR register may be corrupted */ 73/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
76#define ANOMALY_05000261 (__SILICON_REVISION__ < 3) 74#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
77/* Stores to data cache may be lost */ 75/* Stores To Data Cache May Be Lost */
78#define ANOMALY_05000262 (__SILICON_REVISION__ < 3) 76#define ANOMALY_05000262 (__SILICON_REVISION__ < 3)
79/* Hardware loop corrupted when taking an ICPLB exception */ 77/* Hardware Loop Corrupted When Taking an ICPLB Exception */
80#define ANOMALY_05000263 (__SILICON_REVISION__ == 2) 78#define ANOMALY_05000263 (__SILICON_REVISION__ == 2)
81/* CSYNC/SSYNC/IDLE causes infinite stall in second to last instruction in hardware loop */ 79/* CSYNC/SSYNC/IDLE Causes Infinite Stall in Penultimate Instruction in Hardware Loop */
82#define ANOMALY_05000264 (__SILICON_REVISION__ < 3) 80#define ANOMALY_05000264 (__SILICON_REVISION__ < 3)
83/* Sensitivity to noise with slow input edge rates on external SPORT TX and RX clocks */ 81/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
84#define ANOMALY_05000265 (1) 82#define ANOMALY_05000265 (1)
85/* Memory DMA error when peripheral DMA is running with non-zero DEB_TRAFFIC_PERIOD */ 83/* Memory DMA error when peripheral DMA is running with non-zero DEB_TRAFFIC_PERIOD */
86#define ANOMALY_05000268 (__SILICON_REVISION__ < 3) 84#define ANOMALY_05000268 (__SILICON_REVISION__ < 3)
87/* High I/O activity causes output voltage of internal voltage regulator (VDDint) to decrease */ 85/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
88#define ANOMALY_05000270 (__SILICON_REVISION__ < 3) 86#define ANOMALY_05000270 (__SILICON_REVISION__ < 3)
89/* Certain data cache write through modes fail for VDDint <=0.9V */ 87/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
90#define ANOMALY_05000272 (1) 88#define ANOMALY_05000272 (1)
91/* Writes to Synchronous SDRAM memory may be lost */ 89/* Writes to Synchronous SDRAM Memory May Be Lost */
92#define ANOMALY_05000273 (__SILICON_REVISION__ < 3) 90#define ANOMALY_05000273 (__SILICON_REVISION__ < 3)
93/* Writes to an I/O data register one SCLK cycle after an edge is detected may clear interrupt */ 91/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
94#define ANOMALY_05000277 (__SILICON_REVISION__ < 3) 92#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
95/* Disabling Peripherals with DMA running may cause DMA system instability */ 93/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
96#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2)) 94#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
97/* SPI Master boot mode does not work well with Atmel Data flash devices */ 95/* SPI Master boot mode does not work well with Atmel Data flash devices */
98#define ANOMALY_05000280 (1) 96#define ANOMALY_05000280 (1)
99/* False Hardware Error Exception when ISR context is not restored */ 97/* False Hardware Error Exception When ISR Context Is Not Restored */
100#define ANOMALY_05000281 (__SILICON_REVISION__ < 3) 98#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
101/* Memory DMA corruption with 32-bit data and traffic control */ 99/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
102#define ANOMALY_05000282 (__SILICON_REVISION__ < 3) 100#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
103/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */ 101/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
104#define ANOMALY_05000283 (__SILICON_REVISION__ < 3) 102#define ANOMALY_05000283 (__SILICON_REVISION__ < 3)
105/* New Feature: EMAC TX DMA Word Alignment (Not Available On Older Silicon) */ 103/* New Feature: EMAC TX DMA Word Alignment (Not Available On Older Silicon) */
106#define ANOMALY_05000285 (__SILICON_REVISION__ < 3) 104#define ANOMALY_05000285 (__SILICON_REVISION__ < 3)
107/* SPORTs may receive bad data if FIFOs fill up */ 105/* SPORTs May Receive Bad Data If FIFOs Fill Up */
108#define ANOMALY_05000288 (__SILICON_REVISION__ < 3) 106#define ANOMALY_05000288 (__SILICON_REVISION__ < 3)
109/* Memory to memory DMA source/destination descriptors must be in same memory space */ 107/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
110#define ANOMALY_05000301 (1) 108#define ANOMALY_05000301 (1)
111/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */ 109/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
112#define ANOMALY_05000304 (__SILICON_REVISION__ < 3) 110#define ANOMALY_05000304 (__SILICON_REVISION__ < 3)
@@ -116,11 +114,11 @@
116#define ANOMALY_05000307 (__SILICON_REVISION__ < 3) 114#define ANOMALY_05000307 (__SILICON_REVISION__ < 3)
117/* Writing UART_THR while UART clock is disabled sends erroneous start bit */ 115/* Writing UART_THR while UART clock is disabled sends erroneous start bit */
118#define ANOMALY_05000309 (__SILICON_REVISION__ < 3) 116#define ANOMALY_05000309 (__SILICON_REVISION__ < 3)
119/* False hardware errors caused by fetches at the boundary of reserved memory */ 117/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
120#define ANOMALY_05000310 (1) 118#define ANOMALY_05000310 (1)
121/* Errors when SSYNC, CSYNC, or loads to LT, LB and LC registers are interrupted */ 119/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
122#define ANOMALY_05000312 (1) 120#define ANOMALY_05000312 (1)
123/* PPI is level sensitive on first transfer */ 121/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
124#define ANOMALY_05000313 (1) 122#define ANOMALY_05000313 (1)
125/* Killed System MMR Write Completes Erroneously On Next System MMR Access */ 123/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
126#define ANOMALY_05000315 (__SILICON_REVISION__ < 3) 124#define ANOMALY_05000315 (__SILICON_REVISION__ < 3)
@@ -156,24 +154,46 @@
156#define ANOMALY_05000426 (1) 154#define ANOMALY_05000426 (1)
157/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 155/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
158#define ANOMALY_05000443 (1) 156#define ANOMALY_05000443 (1)
157/* False Hardware Error when RETI points to invalid memory */
158#define ANOMALY_05000461 (1)
159 159
160/* Anomalies that don't exist on this proc */ 160/* Anomalies that don't exist on this proc */
161#define ANOMALY_05000099 (0)
162#define ANOMALY_05000120 (0)
161#define ANOMALY_05000125 (0) 163#define ANOMALY_05000125 (0)
164#define ANOMALY_05000149 (0)
162#define ANOMALY_05000158 (0) 165#define ANOMALY_05000158 (0)
166#define ANOMALY_05000171 (0)
167#define ANOMALY_05000179 (0)
163#define ANOMALY_05000183 (0) 168#define ANOMALY_05000183 (0)
164#define ANOMALY_05000198 (0) 169#define ANOMALY_05000198 (0)
170#define ANOMALY_05000215 (0)
171#define ANOMALY_05000220 (0)
172#define ANOMALY_05000227 (0)
165#define ANOMALY_05000230 (0) 173#define ANOMALY_05000230 (0)
174#define ANOMALY_05000231 (0)
175#define ANOMALY_05000233 (0)
176#define ANOMALY_05000242 (0)
177#define ANOMALY_05000248 (0)
166#define ANOMALY_05000266 (0) 178#define ANOMALY_05000266 (0)
179#define ANOMALY_05000274 (0)
180#define ANOMALY_05000287 (0)
167#define ANOMALY_05000311 (0) 181#define ANOMALY_05000311 (0)
168#define ANOMALY_05000323 (0) 182#define ANOMALY_05000323 (0)
169#define ANOMALY_05000353 (1) 183#define ANOMALY_05000353 (1)
184#define ANOMALY_05000362 (1)
170#define ANOMALY_05000363 (0) 185#define ANOMALY_05000363 (0)
171#define ANOMALY_05000380 (0) 186#define ANOMALY_05000380 (0)
172#define ANOMALY_05000386 (1) 187#define ANOMALY_05000386 (1)
188#define ANOMALY_05000389 (0)
189#define ANOMALY_05000400 (0)
173#define ANOMALY_05000412 (0) 190#define ANOMALY_05000412 (0)
191#define ANOMALY_05000430 (0)
174#define ANOMALY_05000432 (0) 192#define ANOMALY_05000432 (0)
175#define ANOMALY_05000435 (0) 193#define ANOMALY_05000435 (0)
176#define ANOMALY_05000447 (0) 194#define ANOMALY_05000447 (0)
177#define ANOMALY_05000448 (0) 195#define ANOMALY_05000448 (0)
196#define ANOMALY_05000456 (0)
197#define ANOMALY_05000450 (0)
178 198
179#endif 199#endif
diff --git a/arch/blackfin/mach-bf538/Kconfig b/arch/blackfin/mach-bf538/Kconfig
index f068c3523cdc..2d280f504ab0 100644
--- a/arch/blackfin/mach-bf538/Kconfig
+++ b/arch/blackfin/mach-bf538/Kconfig
@@ -57,6 +57,7 @@ config IRQ_UART0_TX
57 default 10 57 default 10
58config IRQ_TIMER0 58config IRQ_TIMER0
59 int "IRQ_TIMER0" 59 int "IRQ_TIMER0"
60 default 7 if TICKSOURCE_GPTMR0
60 default 8 61 default 8
61config IRQ_TIMER1 62config IRQ_TIMER1
62 int "IRQ_TIMER1" 63 int "IRQ_TIMER1"
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 3a5699827363..175ca9ef7232 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -6,7 +6,7 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision G, 09/18/2008; ADSP-BF538/BF538F Blackfin Processor Anomaly List 10 * - Revision G, 09/18/2008; ADSP-BF538/BF538F Blackfin Processor Anomaly List
11 * - Revision L, 09/18/2008; ADSP-BF539/BF539F Blackfin Processor Anomaly List 11 * - Revision L, 09/18/2008; ADSP-BF539/BF539F Blackfin Processor Anomaly List
12 */ 12 */
@@ -14,17 +14,29 @@
14#ifndef _MACH_ANOMALY_H_ 14#ifndef _MACH_ANOMALY_H_
15#define _MACH_ANOMALY_H_ 15#define _MACH_ANOMALY_H_
16 16
17/* We do not support old silicon - sorry */
17#if __SILICON_REVISION__ < 4 18#if __SILICON_REVISION__ < 4
18# error will not work on BF538 silicon version 0.0, 0.1, 0.2, or 0.3 19# error will not work on BF538/BF539 silicon version 0.0, 0.1, 0.2, or 0.3
19#endif 20#endif
20 21
21/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */ 22#if defined(__ADSPBF538__)
23# define ANOMALY_BF538 1
24#else
25# define ANOMALY_BF538 0
26#endif
27#if defined(__ADSPBF539__)
28# define ANOMALY_BF539 1
29#else
30# define ANOMALY_BF539 0
31#endif
32
33/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
22#define ANOMALY_05000074 (1) 34#define ANOMALY_05000074 (1)
23/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */ 35/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
24#define ANOMALY_05000119 (1) 36#define ANOMALY_05000119 (1)
25/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 37/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
26#define ANOMALY_05000122 (1) 38#define ANOMALY_05000122 (1)
27/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */ 39/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
28#define ANOMALY_05000166 (1) 40#define ANOMALY_05000166 (1)
29/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */ 41/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
30#define ANOMALY_05000179 (1) 42#define ANOMALY_05000179 (1)
@@ -40,13 +52,13 @@
40#define ANOMALY_05000229 (1) 52#define ANOMALY_05000229 (1)
41/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */ 53/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */
42#define ANOMALY_05000233 (1) 54#define ANOMALY_05000233 (1)
43/* If i-cache is on, CSYNC/SSYNC/IDLE around Change of Control causes failures */ 55/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
44#define ANOMALY_05000244 (__SILICON_REVISION__ < 3) 56#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
45/* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */ 57/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
46#define ANOMALY_05000245 (1) 58#define ANOMALY_05000245 (1)
47/* Maximum External Clock Speed for Timers */ 59/* Maximum External Clock Speed for Timers */
48#define ANOMALY_05000253 (1) 60#define ANOMALY_05000253 (1)
49/* DCPLB_FAULT_ADDR MMR register may be corrupted */ 61/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
50#define ANOMALY_05000261 (__SILICON_REVISION__ < 3) 62#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
51/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */ 63/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
52#define ANOMALY_05000270 (__SILICON_REVISION__ < 4) 64#define ANOMALY_05000270 (__SILICON_REVISION__ < 4)
@@ -58,11 +70,11 @@
58#define ANOMALY_05000277 (__SILICON_REVISION__ < 4) 70#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
59/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 71/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
60#define ANOMALY_05000278 (__SILICON_REVISION__ < 4) 72#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
61/* False Hardware Error Exception when ISR Context Is Not Restored */ 73/* False Hardware Error Exception When ISR Context Is Not Restored */
62#define ANOMALY_05000281 (__SILICON_REVISION__ < 4) 74#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
63/* Memory DMA Corruption with 32-Bit Data and Traffic Control */ 75/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
64#define ANOMALY_05000282 (__SILICON_REVISION__ < 4) 76#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
65/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */ 77/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
66#define ANOMALY_05000283 (__SILICON_REVISION__ < 4) 78#define ANOMALY_05000283 (__SILICON_REVISION__ < 4)
67/* SPORTs May Receive Bad Data If FIFOs Fill Up */ 79/* SPORTs May Receive Bad Data If FIFOs Fill Up */
68#define ANOMALY_05000288 (__SILICON_REVISION__ < 4) 80#define ANOMALY_05000288 (__SILICON_REVISION__ < 4)
@@ -80,14 +92,14 @@
80#define ANOMALY_05000307 (__SILICON_REVISION__ < 4) 92#define ANOMALY_05000307 (__SILICON_REVISION__ < 4)
81/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */ 93/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
82#define ANOMALY_05000310 (1) 94#define ANOMALY_05000310 (1)
83/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */ 95/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
84#define ANOMALY_05000312 (__SILICON_REVISION__ < 5) 96#define ANOMALY_05000312 (__SILICON_REVISION__ < 5)
85/* PPI Is Level-Sensitive on First Transfer */ 97/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
86#define ANOMALY_05000313 (__SILICON_REVISION__ < 4) 98#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
87/* Killed System MMR Write Completes Erroneously on Next System MMR Access */ 99/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
88#define ANOMALY_05000315 (__SILICON_REVISION__ < 4) 100#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
89/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */ 101/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
90#define ANOMALY_05000318 (__SILICON_REVISION__ < 4) 102#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4)
91/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */ 103/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
92#define ANOMALY_05000355 (__SILICON_REVISION__ < 5) 104#define ANOMALY_05000355 (__SILICON_REVISION__ < 5)
93/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ 105/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
@@ -114,23 +126,45 @@
114#define ANOMALY_05000436 (__SILICON_REVISION__ > 3) 126#define ANOMALY_05000436 (__SILICON_REVISION__ > 3)
115/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 127/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
116#define ANOMALY_05000443 (1) 128#define ANOMALY_05000443 (1)
129/* False Hardware Error when RETI points to invalid memory */
130#define ANOMALY_05000461 (1)
117 131
118/* Anomalies that don't exist on this proc */ 132/* Anomalies that don't exist on this proc */
133#define ANOMALY_05000099 (0)
134#define ANOMALY_05000120 (0)
135#define ANOMALY_05000149 (0)
119#define ANOMALY_05000158 (0) 136#define ANOMALY_05000158 (0)
137#define ANOMALY_05000171 (0)
120#define ANOMALY_05000198 (0) 138#define ANOMALY_05000198 (0)
139#define ANOMALY_05000215 (0)
140#define ANOMALY_05000220 (0)
141#define ANOMALY_05000227 (0)
121#define ANOMALY_05000230 (0) 142#define ANOMALY_05000230 (0)
143#define ANOMALY_05000231 (0)
144#define ANOMALY_05000242 (0)
145#define ANOMALY_05000248 (0)
146#define ANOMALY_05000250 (0)
147#define ANOMALY_05000254 (0)
122#define ANOMALY_05000263 (0) 148#define ANOMALY_05000263 (0)
149#define ANOMALY_05000274 (0)
150#define ANOMALY_05000287 (0)
123#define ANOMALY_05000305 (0) 151#define ANOMALY_05000305 (0)
124#define ANOMALY_05000311 (0) 152#define ANOMALY_05000311 (0)
125#define ANOMALY_05000323 (0) 153#define ANOMALY_05000323 (0)
126#define ANOMALY_05000353 (1) 154#define ANOMALY_05000353 (1)
155#define ANOMALY_05000362 (1)
127#define ANOMALY_05000363 (0) 156#define ANOMALY_05000363 (0)
128#define ANOMALY_05000380 (0) 157#define ANOMALY_05000380 (0)
129#define ANOMALY_05000386 (1) 158#define ANOMALY_05000386 (1)
159#define ANOMALY_05000389 (0)
160#define ANOMALY_05000400 (0)
130#define ANOMALY_05000412 (0) 161#define ANOMALY_05000412 (0)
162#define ANOMALY_05000430 (0)
131#define ANOMALY_05000432 (0) 163#define ANOMALY_05000432 (0)
132#define ANOMALY_05000435 (0) 164#define ANOMALY_05000435 (0)
133#define ANOMALY_05000447 (0) 165#define ANOMALY_05000447 (0)
134#define ANOMALY_05000448 (0) 166#define ANOMALY_05000448 (0)
167#define ANOMALY_05000456 (0)
168#define ANOMALY_05000450 (0)
135 169
136#endif 170#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index ea25371a922b..6f628353dde3 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -68,25 +68,6 @@
68#define OFFSET_SCR 0x1C /* SCR Scratch Register */ 68#define OFFSET_SCR 0x1C /* SCR Scratch Register */
69#define OFFSET_GCTL 0x24 /* Global Control Register */ 69#define OFFSET_GCTL 0x24 /* Global Control Register */
70 70
71
72#define bfin_write_MDMA_D0_IRQ_STATUS bfin_write_MDMA0_D0_IRQ_STATUS
73#define bfin_write_MDMA_D0_START_ADDR bfin_write_MDMA0_D0_START_ADDR
74#define bfin_write_MDMA_S0_START_ADDR bfin_write_MDMA0_S0_START_ADDR
75#define bfin_write_MDMA_D0_X_COUNT bfin_write_MDMA0_D0_X_COUNT
76#define bfin_write_MDMA_S0_X_COUNT bfin_write_MDMA0_S0_X_COUNT
77#define bfin_write_MDMA_D0_Y_COUNT bfin_write_MDMA0_D0_Y_COUNT
78#define bfin_write_MDMA_S0_Y_COUNT bfin_write_MDMA0_S0_Y_COUNT
79#define bfin_write_MDMA_D0_X_MODIFY bfin_write_MDMA0_D0_X_MODIFY
80#define bfin_write_MDMA_S0_X_MODIFY bfin_write_MDMA0_S0_X_MODIFY
81#define bfin_write_MDMA_D0_Y_MODIFY bfin_write_MDMA0_D0_Y_MODIFY
82#define bfin_write_MDMA_S0_Y_MODIFY bfin_write_MDMA0_S0_Y_MODIFY
83#define bfin_write_MDMA_S0_CONFIG bfin_write_MDMA0_S0_CONFIG
84#define bfin_write_MDMA_D0_CONFIG bfin_write_MDMA0_D0_CONFIG
85#define bfin_read_MDMA_S0_CONFIG bfin_read_MDMA0_S0_CONFIG
86#define bfin_read_MDMA_D0_IRQ_STATUS bfin_read_MDMA0_D0_IRQ_STATUS
87#define bfin_write_MDMA_S0_IRQ_STATUS bfin_write_MDMA0_S0_IRQ_STATUS
88
89
90/* DPMC*/ 71/* DPMC*/
91#define bfin_read_STOPCK_OFF() bfin_read_STOPCK() 72#define bfin_read_STOPCK_OFF() bfin_read_STOPCK()
92#define bfin_write_STOPCK_OFF(val) bfin_write_STOPCK(val) 73#define bfin_write_STOPCK_OFF(val) bfin_write_STOPCK(val)
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
index 241725bc6988..99ca3f4305e2 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
@@ -67,14 +67,14 @@
67#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val) 67#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
68#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1) 68#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
69#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val) 69#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
70#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + x * (SIC_ISR1 - SIC_ISR0)) 70#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + x * (SIC_ISR1 - SIC_ISR0))
71#define bfin_write_SIC_ISR(x, val) bfin_write32(SIC_ISR0 + x * (SIC_ISR1 - SIC_ISR0), val) 71#define bfin_write_SIC_ISR(x, val) bfin_write32(SIC_ISR0 + x * (SIC_ISR1 - SIC_ISR0), val)
72#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0) 72#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
73#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val) 73#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
74#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1) 74#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
75#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val) 75#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
76#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + x * (SIC_IWR1 - SIC_IWR0)) 76#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + x * (SIC_IWR1 - SIC_IWR0))
77#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + x * (SIC_IWR1 - SIC_IWR0), val) 77#define bfin_write_SIC_IWR(x, val) bfin_write32(SIC_IWR0 + x * (SIC_IWR1 - SIC_IWR0), val)
78#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0) 78#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
79#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val) 79#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
80#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1) 80#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
@@ -1247,6 +1247,65 @@
1247#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val) 1247#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val)
1248#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT) 1248#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT)
1249#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val) 1249#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val)
1250
1251#define bfin_read_MDMA_S0_CONFIG() bfin_read_MDMA0_S0_CONFIG()
1252#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA0_S0_CONFIG(val)
1253#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read_MDMA0_S0_IRQ_STATUS()
1254#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA0_S0_IRQ_STATUS(val)
1255#define bfin_read_MDMA_S0_X_MODIFY() bfin_read_MDMA0_S0_X_MODIFY()
1256#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA0_S0_X_MODIFY(val)
1257#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read_MDMA0_S0_Y_MODIFY()
1258#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA0_S0_Y_MODIFY(val)
1259#define bfin_read_MDMA_S0_X_COUNT() bfin_read_MDMA0_S0_X_COUNT()
1260#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA0_S0_X_COUNT(val)
1261#define bfin_read_MDMA_S0_Y_COUNT() bfin_read_MDMA0_S0_Y_COUNT()
1262#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA0_S0_Y_COUNT(val)
1263#define bfin_read_MDMA_S0_START_ADDR() bfin_read_MDMA0_S0_START_ADDR()
1264#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA0_S0_START_ADDR(val)
1265#define bfin_read_MDMA_D0_CONFIG() bfin_read_MDMA0_D0_CONFIG()
1266#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA0_D0_CONFIG(val)
1267#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read_MDMA0_D0_IRQ_STATUS()
1268#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA0_D0_IRQ_STATUS(val)
1269#define bfin_read_MDMA_D0_X_MODIFY() bfin_read_MDMA0_D0_X_MODIFY()
1270#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA0_D0_X_MODIFY(val)
1271#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read_MDMA0_D0_Y_MODIFY()
1272#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA0_D0_Y_MODIFY(val)
1273#define bfin_read_MDMA_D0_X_COUNT() bfin_read_MDMA0_D0_X_COUNT()
1274#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA0_D0_X_COUNT(val)
1275#define bfin_read_MDMA_D0_Y_COUNT() bfin_read_MDMA0_D0_Y_COUNT()
1276#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA0_D0_Y_COUNT(val)
1277#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA0_D0_START_ADDR()
1278#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA0_D0_START_ADDR(val)
1279
1280#define bfin_read_MDMA_S1_CONFIG() bfin_read_MDMA0_S1_CONFIG()
1281#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA0_S1_CONFIG(val)
1282#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read_MDMA0_S1_IRQ_STATUS()
1283#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA0_S1_IRQ_STATUS(val)
1284#define bfin_read_MDMA_S1_X_MODIFY() bfin_read_MDMA0_S1_X_MODIFY()
1285#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA0_S1_X_MODIFY(val)
1286#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read_MDMA0_S1_Y_MODIFY()
1287#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA0_S1_Y_MODIFY(val)
1288#define bfin_read_MDMA_S1_X_COUNT() bfin_read_MDMA0_S1_X_COUNT()
1289#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA0_S1_X_COUNT(val)
1290#define bfin_read_MDMA_S1_Y_COUNT() bfin_read_MDMA0_S1_Y_COUNT()
1291#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA0_S1_Y_COUNT(val)
1292#define bfin_read_MDMA_S1_START_ADDR() bfin_read_MDMA0_S1_START_ADDR()
1293#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA0_S1_START_ADDR(val)
1294#define bfin_read_MDMA_D1_CONFIG() bfin_read_MDMA0_D1_CONFIG()
1295#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA0_D1_CONFIG(val)
1296#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read_MDMA0_D1_IRQ_STATUS()
1297#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA0_D1_IRQ_STATUS(val)
1298#define bfin_read_MDMA_D1_X_MODIFY() bfin_read_MDMA0_D1_X_MODIFY()
1299#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA0_D1_X_MODIFY(val)
1300#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read_MDMA0_D1_Y_MODIFY()
1301#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA0_D1_Y_MODIFY(val)
1302#define bfin_read_MDMA_D1_X_COUNT() bfin_read_MDMA0_D1_X_COUNT()
1303#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA0_D1_X_COUNT(val)
1304#define bfin_read_MDMA_D1_Y_COUNT() bfin_read_MDMA0_D1_Y_COUNT()
1305#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA0_D1_Y_COUNT(val)
1306#define bfin_read_MDMA_D1_START_ADDR() bfin_read_MDMA0_D1_START_ADDR()
1307#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA0_D1_START_ADDR(val)
1308
1250#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL) 1309#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
1251#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val) 1310#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
1252#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS) 1311#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 6adbfcc65a35..bdc330cd0e1c 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -412,6 +412,62 @@
412#define MDMA0_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA0 Stream 1 Source Current X Count Register */ 412#define MDMA0_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA0 Stream 1 Source Current X Count Register */
413#define MDMA0_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA0 Stream 1 Source Current Y Count Register */ 413#define MDMA0_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA0 Stream 1 Source Current Y Count Register */
414 414
415#define MDMA_D0_NEXT_DESC_PTR MDMA0_D0_NEXT_DESC_PTR
416#define MDMA_D0_START_ADDR MDMA0_D0_START_ADDR
417#define MDMA_D0_CONFIG MDMA0_D0_CONFIG
418#define MDMA_D0_X_COUNT MDMA0_D0_X_COUNT
419#define MDMA_D0_X_MODIFY MDMA0_D0_X_MODIFY
420#define MDMA_D0_Y_COUNT MDMA0_D0_Y_COUNT
421#define MDMA_D0_Y_MODIFY MDMA0_D0_Y_MODIFY
422#define MDMA_D0_CURR_DESC_PTR MDMA0_D0_CURR_DESC_PTR
423#define MDMA_D0_CURR_ADDR MDMA0_D0_CURR_ADDR
424#define MDMA_D0_IRQ_STATUS MDMA0_D0_IRQ_STATUS
425#define MDMA_D0_PERIPHERAL_MAP MDMA0_D0_PERIPHERAL_MAP
426#define MDMA_D0_CURR_X_COUNT MDMA0_D0_CURR_X_COUNT
427#define MDMA_D0_CURR_Y_COUNT MDMA0_D0_CURR_Y_COUNT
428
429#define MDMA_S0_NEXT_DESC_PTR MDMA0_S0_NEXT_DESC_PTR
430#define MDMA_S0_START_ADDR MDMA0_S0_START_ADDR
431#define MDMA_S0_CONFIG MDMA0_S0_CONFIG
432#define MDMA_S0_X_COUNT MDMA0_S0_X_COUNT
433#define MDMA_S0_X_MODIFY MDMA0_S0_X_MODIFY
434#define MDMA_S0_Y_COUNT MDMA0_S0_Y_COUNT
435#define MDMA_S0_Y_MODIFY MDMA0_S0_Y_MODIFY
436#define MDMA_S0_CURR_DESC_PTR MDMA0_S0_CURR_DESC_PTR
437#define MDMA_S0_CURR_ADDR MDMA0_S0_CURR_ADDR
438#define MDMA_S0_IRQ_STATUS MDMA0_S0_IRQ_STATUS
439#define MDMA_S0_PERIPHERAL_MAP MDMA0_S0_PERIPHERAL_MAP
440#define MDMA_S0_CURR_X_COUNT MDMA0_S0_CURR_X_COUNT
441#define MDMA_S0_CURR_Y_COUNT MDMA0_S0_CURR_Y_COUNT
442
443#define MDMA_D1_NEXT_DESC_PTR MDMA0_D1_NEXT_DESC_PTR
444#define MDMA_D1_START_ADDR MDMA0_D1_START_ADDR
445#define MDMA_D1_CONFIG MDMA0_D1_CONFIG
446#define MDMA_D1_X_COUNT MDMA0_D1_X_COUNT
447#define MDMA_D1_X_MODIFY MDMA0_D1_X_MODIFY
448#define MDMA_D1_Y_COUNT MDMA0_D1_Y_COUNT
449#define MDMA_D1_Y_MODIFY MDMA0_D1_Y_MODIFY
450#define MDMA_D1_CURR_DESC_PTR MDMA0_D1_CURR_DESC_PTR
451#define MDMA_D1_CURR_ADDR MDMA0_D1_CURR_ADDR
452#define MDMA_D1_IRQ_STATUS MDMA0_D1_IRQ_STATUS
453#define MDMA_D1_PERIPHERAL_MAP MDMA0_D1_PERIPHERAL_MAP
454#define MDMA_D1_CURR_X_COUNT MDMA0_D1_CURR_X_COUNT
455#define MDMA_D1_CURR_Y_COUNT MDMA0_D1_CURR_Y_COUNT
456
457#define MDMA_S1_NEXT_DESC_PTR MDMA0_S1_NEXT_DESC_PTR
458#define MDMA_S1_START_ADDR MDMA0_S1_START_ADDR
459#define MDMA_S1_CONFIG MDMA0_S1_CONFIG
460#define MDMA_S1_X_COUNT MDMA0_S1_X_COUNT
461#define MDMA_S1_X_MODIFY MDMA0_S1_X_MODIFY
462#define MDMA_S1_Y_COUNT MDMA0_S1_Y_COUNT
463#define MDMA_S1_Y_MODIFY MDMA0_S1_Y_MODIFY
464#define MDMA_S1_CURR_DESC_PTR MDMA0_S1_CURR_DESC_PTR
465#define MDMA_S1_CURR_ADDR MDMA0_S1_CURR_ADDR
466#define MDMA_S1_IRQ_STATUS MDMA0_S1_IRQ_STATUS
467#define MDMA_S1_PERIPHERAL_MAP MDMA0_S1_PERIPHERAL_MAP
468#define MDMA_S1_CURR_X_COUNT MDMA0_S1_CURR_X_COUNT
469#define MDMA_S1_CURR_Y_COUNT MDMA0_S1_CURR_Y_COUNT
470
415 471
416/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */ 472/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
417#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */ 473#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
diff --git a/arch/blackfin/mach-bf548/Kconfig b/arch/blackfin/mach-bf548/Kconfig
index dcf657159051..a09623dfd550 100644
--- a/arch/blackfin/mach-bf548/Kconfig
+++ b/arch/blackfin/mach-bf548/Kconfig
@@ -11,6 +11,13 @@ config DEB_DMA_URGENT
11 help 11 help
12 Treat any DEB1, DEB2 and DEB3 request as Urgent 12 Treat any DEB1, DEB2 and DEB3 request as Urgent
13 13
14config BF548_ATAPI_ALTERNATIVE_PORT
15 bool "BF548 ATAPI alternative port via GPIO"
16 help
17 BF548 ATAPI data and address PINs can be routed through
18 async address or GPIO port F and G. Select y to route it
19 to GPIO.
20
14comment "Interrupt Priority Assignment" 21comment "Interrupt Priority Assignment"
15menu "Priority" 22menu "Priority"
16 23
@@ -250,6 +257,7 @@ config IRQ_OTPSEC
250 default 11 257 default 11
251config IRQ_TIMER0 258config IRQ_TIMER0
252 int "IRQ_TIMER0" 259 int "IRQ_TIMER0"
260 default 7 if TICKSOURCE_GPTMR0
253 default 8 261 default 8
254config IRQ_TIMER1 262config IRQ_TIMER1
255 int "IRQ_TIMER1" 263 int "IRQ_TIMER1"
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 096e661700a7..add5a17452ce 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -208,6 +208,43 @@ static struct platform_device bfin_rotary_device = {
208}; 208};
209#endif 209#endif
210 210
211#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
212#include <linux/input.h>
213#include <linux/spi/adxl34x.h>
214static const struct adxl34x_platform_data adxl34x_info = {
215 .x_axis_offset = 0,
216 .y_axis_offset = 0,
217 .z_axis_offset = 0,
218 .tap_threshold = 0x31,
219 .tap_duration = 0x10,
220 .tap_latency = 0x60,
221 .tap_window = 0xF0,
222 .tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
223 .act_axis_control = 0xFF,
224 .activity_threshold = 5,
225 .inactivity_threshold = 3,
226 .inactivity_time = 4,
227 .free_fall_threshold = 0x7,
228 .free_fall_time = 0x20,
229 .data_rate = 0x8,
230 .data_range = ADXL_FULL_RES,
231
232 .ev_type = EV_ABS,
233 .ev_code_x = ABS_X, /* EV_REL */
234 .ev_code_y = ABS_Y, /* EV_REL */
235 .ev_code_z = ABS_Z, /* EV_REL */
236
237 .ev_code_tap_x = BTN_TOUCH, /* EV_KEY */
238 .ev_code_tap_y = BTN_TOUCH, /* EV_KEY */
239 .ev_code_tap_z = BTN_TOUCH, /* EV_KEY */
240
241/* .ev_code_ff = KEY_F,*/ /* EV_KEY */
242/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
243 .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
244 .fifo_mode = ADXL_FIFO_STREAM,
245};
246#endif
247
211#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 248#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
212static struct platform_device rtc_device = { 249static struct platform_device rtc_device = {
213 .name = "rtc-bfin", 250 .name = "rtc-bfin",
@@ -628,6 +665,14 @@ static struct bfin5xx_spi_chip spidev_chip_info = {
628}; 665};
629#endif 666#endif
630 667
668#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
669static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
670 .enable_dma = 0, /* use dma transfer with this chip*/
671 .bits_per_word = 8,
672 .cs_change_per_word = 0,
673};
674#endif
675
631static struct spi_board_info bfin_spi_board_info[] __initdata = { 676static struct spi_board_info bfin_spi_board_info[] __initdata = {
632#if defined(CONFIG_MTD_M25P80) \ 677#if defined(CONFIG_MTD_M25P80) \
633 || defined(CONFIG_MTD_M25P80_MODULE) 678 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -653,15 +698,15 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
653 }, 698 },
654#endif 699#endif
655#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 700#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
656{ 701 {
657 .modalias = "ad7877", 702 .modalias = "ad7877",
658 .platform_data = &bfin_ad7877_ts_info, 703 .platform_data = &bfin_ad7877_ts_info,
659 .irq = IRQ_PB4, /* old boards (<=Rev 1.3) use IRQ_PJ11 */ 704 .irq = IRQ_PB4, /* old boards (<=Rev 1.3) use IRQ_PJ11 */
660 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 705 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
661 .bus_num = 0, 706 .bus_num = 0,
662 .chip_select = 2, 707 .chip_select = 2,
663 .controller_data = &spi_ad7877_chip_info, 708 .controller_data = &spi_ad7877_chip_info,
664}, 709 },
665#endif 710#endif
666#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 711#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
667 { 712 {
@@ -672,8 +717,19 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
672 .controller_data = &spidev_chip_info, 717 .controller_data = &spidev_chip_info,
673 }, 718 },
674#endif 719#endif
720#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
721 {
722 .modalias = "adxl34x",
723 .platform_data = &adxl34x_info,
724 .irq = IRQ_PC5,
725 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
726 .bus_num = 1,
727 .chip_select = 2,
728 .controller_data = &spi_adxl34x_chip_info,
729 .mode = SPI_MODE_3,
730 },
731#endif
675}; 732};
676
677#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 733#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
678/* SPI (0) */ 734/* SPI (0) */
679static struct resource bfin_spi0_resource[] = { 735static struct resource bfin_spi0_resource[] = {
@@ -786,7 +842,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
786 842
787#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */ 843#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */
788static struct i2c_board_info __initdata bfin_i2c_board_info1[] = { 844static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
789#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE) 845#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
790 { 846 {
791 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 847 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
792 }, 848 },
@@ -797,6 +853,13 @@ static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
797 .irq = 212, 853 .irq = 212,
798 }, 854 },
799#endif 855#endif
856#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE)
857 {
858 I2C_BOARD_INFO("adxl34x", 0x53),
859 .irq = IRQ_PC5,
860 .platform_data = (void *)&adxl34x_info,
861 },
862#endif
800}; 863};
801#endif 864#endif
802 865
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 882e40ccf0d1..c510ae688e28 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -6,26 +6,31 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision H, 01/16/2009; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List 10 * - Revision H, 01/16/2009; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
11 */ 11 */
12 12
13#ifndef _MACH_ANOMALY_H_ 13#ifndef _MACH_ANOMALY_H_
14#define _MACH_ANOMALY_H_ 14#define _MACH_ANOMALY_H_
15 15
16/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */ 16/* We do not support 0.0 or 0.1 silicon - sorry */
17#if __SILICON_REVISION__ < 2
18# error will not work on BF548 silicon version 0.0, or 0.1
19#endif
20
21/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
17#define ANOMALY_05000074 (1) 22#define ANOMALY_05000074 (1)
18/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */ 23/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
19#define ANOMALY_05000119 (1) 24#define ANOMALY_05000119 (1)
20/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 25/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
21#define ANOMALY_05000122 (1) 26#define ANOMALY_05000122 (1)
22/* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */ 27/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
23#define ANOMALY_05000245 (1) 28#define ANOMALY_05000245 (1)
24/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ 29/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
25#define ANOMALY_05000265 (1) 30#define ANOMALY_05000265 (1)
26/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */ 31/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
27#define ANOMALY_05000272 (1) 32#define ANOMALY_05000272 (1)
28/* False Hardware Error Exception when ISR context is not restored */ 33/* False Hardware Error Exception When ISR Context Is Not Restored */
29#define ANOMALY_05000281 (__SILICON_REVISION__ < 1) 34#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
30/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */ 35/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
31#define ANOMALY_05000304 (__SILICON_REVISION__ < 1) 36#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
@@ -59,7 +64,7 @@
59#define ANOMALY_05000340 (__SILICON_REVISION__ < 1) 64#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
60/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */ 65/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
61#define ANOMALY_05000344 (__SILICON_REVISION__ < 1) 66#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
62/* USB Calibration Value Is Not Intialized */ 67/* USB Calibration Value Is Not Initialized */
63#define ANOMALY_05000346 (__SILICON_REVISION__ < 1) 68#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
64/* USB Calibration Value to use */ 69/* USB Calibration Value to use */
65#define ANOMALY_05000346_value 0x5411 70#define ANOMALY_05000346_value 0x5411
@@ -147,11 +152,11 @@
147#define ANOMALY_05000416 (1) 152#define ANOMALY_05000416 (1)
148/* Multichannel SPORT Channel Misalignment Under Specific Configuration */ 153/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
149#define ANOMALY_05000425 (1) 154#define ANOMALY_05000425 (1)
150/* Speculative Fetches of Indirect-Pointer Instructions Can Cause Spurious Hardware Errors */ 155/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
151#define ANOMALY_05000426 (1) 156#define ANOMALY_05000426 (1)
152/* CORE_EPPI_PRIO bit and SYS_EPPI_PRIO bit in the HMDMA1_CONTROL register are not functional */ 157/* CORE_EPPI_PRIO bit and SYS_EPPI_PRIO bit in the HMDMA1_CONTROL register are not functional */
153#define ANOMALY_05000427 (__SILICON_REVISION__ < 2) 158#define ANOMALY_05000427 (__SILICON_REVISION__ < 2)
154/* WB_EDGE Bit in NFC_IRQSTAT Incorrectly Behaves as a Buffer Status Bit Instead of an IRQ Status Bit */ 159/* WB_EDGE Bit in NFC_IRQSTAT Incorrectly Reflects Buffer Status Instead of IRQ Status */
155#define ANOMALY_05000429 (__SILICON_REVISION__ < 2) 160#define ANOMALY_05000429 (__SILICON_REVISION__ < 2)
156/* Software System Reset Corrupts PLL_LOCKCNT Register */ 161/* Software System Reset Corrupts PLL_LOCKCNT Register */
157#define ANOMALY_05000430 (__SILICON_REVISION__ >= 2) 162#define ANOMALY_05000430 (__SILICON_REVISION__ >= 2)
@@ -170,26 +175,49 @@
170/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */ 175/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */
171#define ANOMALY_05000449 (__SILICON_REVISION__ == 1) 176#define ANOMALY_05000449 (__SILICON_REVISION__ == 1)
172/* USB DMA Mode 1 Short Packet Data Corruption */ 177/* USB DMA Mode 1 Short Packet Data Corruption */
173#define ANOMALY_05000450 (1 178#define ANOMALY_05000450 (1)
179/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
180#define ANOMALY_05000456 (__SILICON_REVISION__ < 3)
181/* False Hardware Error when RETI points to invalid memory */
182#define ANOMALY_05000461 (1)
174 183
175/* Anomalies that don't exist on this proc */ 184/* Anomalies that don't exist on this proc */
185#define ANOMALY_05000099 (0)
186#define ANOMALY_05000120 (0)
176#define ANOMALY_05000125 (0) 187#define ANOMALY_05000125 (0)
188#define ANOMALY_05000149 (0)
177#define ANOMALY_05000158 (0) 189#define ANOMALY_05000158 (0)
190#define ANOMALY_05000171 (0)
191#define ANOMALY_05000179 (0)
178#define ANOMALY_05000183 (0) 192#define ANOMALY_05000183 (0)
179#define ANOMALY_05000198 (0) 193#define ANOMALY_05000198 (0)
194#define ANOMALY_05000215 (0)
195#define ANOMALY_05000220 (0)
196#define ANOMALY_05000227 (0)
180#define ANOMALY_05000230 (0) 197#define ANOMALY_05000230 (0)
198#define ANOMALY_05000231 (0)
199#define ANOMALY_05000233 (0)
200#define ANOMALY_05000242 (0)
181#define ANOMALY_05000244 (0) 201#define ANOMALY_05000244 (0)
202#define ANOMALY_05000248 (0)
203#define ANOMALY_05000250 (0)
204#define ANOMALY_05000254 (0)
182#define ANOMALY_05000261 (0) 205#define ANOMALY_05000261 (0)
183#define ANOMALY_05000263 (0) 206#define ANOMALY_05000263 (0)
184#define ANOMALY_05000266 (0) 207#define ANOMALY_05000266 (0)
185#define ANOMALY_05000273 (0) 208#define ANOMALY_05000273 (0)
209#define ANOMALY_05000274 (0)
186#define ANOMALY_05000278 (0) 210#define ANOMALY_05000278 (0)
211#define ANOMALY_05000287 (0)
212#define ANOMALY_05000301 (0)
187#define ANOMALY_05000305 (0) 213#define ANOMALY_05000305 (0)
188#define ANOMALY_05000307 (0) 214#define ANOMALY_05000307 (0)
189#define ANOMALY_05000311 (0) 215#define ANOMALY_05000311 (0)
190#define ANOMALY_05000323 (0) 216#define ANOMALY_05000323 (0)
217#define ANOMALY_05000362 (1)
191#define ANOMALY_05000363 (0) 218#define ANOMALY_05000363 (0)
192#define ANOMALY_05000380 (0) 219#define ANOMALY_05000380 (0)
220#define ANOMALY_05000400 (0)
193#define ANOMALY_05000412 (0) 221#define ANOMALY_05000412 (0)
194#define ANOMALY_05000432 (0) 222#define ANOMALY_05000432 (0)
195#define ANOMALY_05000435 (0) 223#define ANOMALY_05000435 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/portmux.h b/arch/blackfin/mach-bf548/include/mach/portmux.h
index ffb1d0a44b4d..ce372ba0f046 100644
--- a/arch/blackfin/mach-bf548/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf548/include/mach/portmux.h
@@ -167,22 +167,42 @@
167#define P_PPI0_D13 (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(0)) 167#define P_PPI0_D13 (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(0))
168#define P_PPI0_D14 (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(0)) 168#define P_PPI0_D14 (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(0))
169#define P_PPI0_D15 (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(0)) 169#define P_PPI0_D15 (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(0))
170#define P_ATAPI_D0A (P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(1)) 170
171#define P_ATAPI_D1A (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(1)) 171#ifdef CONFIG_BF548_ATAPI_ALTERNATIVE_PORT
172#define P_ATAPI_D2A (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(1)) 172# define P_ATAPI_D0A (P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(1))
173#define P_ATAPI_D3A (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(1)) 173# define P_ATAPI_D1A (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(1))
174#define P_ATAPI_D4A (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(1)) 174# define P_ATAPI_D2A (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(1))
175#define P_ATAPI_D5A (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(1)) 175# define P_ATAPI_D3A (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(1))
176#define P_ATAPI_D6A (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(1)) 176# define P_ATAPI_D4A (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(1))
177#define P_ATAPI_D7A (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(1)) 177# define P_ATAPI_D5A (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(1))
178#define P_ATAPI_D8A (P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(1)) 178# define P_ATAPI_D6A (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(1))
179#define P_ATAPI_D9A (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(1)) 179# define P_ATAPI_D7A (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(1))
180#define P_ATAPI_D10A (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(1)) 180# define P_ATAPI_D8A (P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(1))
181#define P_ATAPI_D11A (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(1)) 181# define P_ATAPI_D9A (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(1))
182#define P_ATAPI_D12A (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(1)) 182# define P_ATAPI_D10A (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(1))
183#define P_ATAPI_D13A (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(1)) 183# define P_ATAPI_D11A (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(1))
184#define P_ATAPI_D14A (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1)) 184# define P_ATAPI_D12A (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(1))
185#define P_ATAPI_D15A (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1)) 185# define P_ATAPI_D13A (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(1))
186# define P_ATAPI_D14A (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1))
187# define P_ATAPI_D15A (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1))
188#else
189# define P_ATAPI_D0A (P_DONTCARE)
190# define P_ATAPI_D1A (P_DONTCARE)
191# define P_ATAPI_D2A (P_DONTCARE)
192# define P_ATAPI_D3A (P_DONTCARE)
193# define P_ATAPI_D4A (P_DONTCARE)
194# define P_ATAPI_D5A (P_DONTCARE)
195# define P_ATAPI_D6A (P_DONTCARE)
196# define P_ATAPI_D7A (P_DONTCARE)
197# define P_ATAPI_D8A (P_DONTCARE)
198# define P_ATAPI_D9A (P_DONTCARE)
199# define P_ATAPI_D10A (P_DONTCARE)
200# define P_ATAPI_D11A (P_DONTCARE)
201# define P_ATAPI_D12A (P_DONTCARE)
202# define P_ATAPI_D13A (P_DONTCARE)
203# define P_ATAPI_D14A (P_DONTCARE)
204# define P_ATAPI_D15A (P_DONTCARE)
205#endif
186 206
187#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG0) | P_FUNCT(0)) 207#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG0) | P_FUNCT(0))
188#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(0)) 208#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(0))
@@ -200,9 +220,15 @@
200#define P_CAN0_RX (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(0)) 220#define P_CAN0_RX (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(0))
201#define P_CAN1_TX (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(0)) 221#define P_CAN1_TX (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(0))
202#define P_CAN1_RX (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(0)) 222#define P_CAN1_RX (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(0))
203#define P_ATAPI_A0A (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(1)) 223#ifdef CONFIG_BF548_ATAPI_ALTERNATIVE_PORT
204#define P_ATAPI_A1A (P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1)) 224# define P_ATAPI_A0A (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(1))
205#define P_ATAPI_A2A (P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(1)) 225# define P_ATAPI_A1A (P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1))
226# define P_ATAPI_A2A (P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(1))
227#else
228# define P_ATAPI_A0A (P_DONTCARE)
229# define P_ATAPI_A1A (P_DONTCARE)
230# define P_ATAPI_A2A (P_DONTCARE)
231#endif
206#define P_HOST_CE (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1)) 232#define P_HOST_CE (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1))
207#define P_HOST_RD (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1)) 233#define P_HOST_RD (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1))
208#define P_HOST_WR (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1)) 234#define P_HOST_WR (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1))
diff --git a/arch/blackfin/mach-bf561/Kconfig b/arch/blackfin/mach-bf561/Kconfig
index 638ec38ca470..cb9743641511 100644
--- a/arch/blackfin/mach-bf561/Kconfig
+++ b/arch/blackfin/mach-bf561/Kconfig
@@ -9,22 +9,9 @@ if (!SMP)
9comment "Core B Support" 9comment "Core B Support"
10 10
11config BF561_COREB 11config BF561_COREB
12 bool "Enable Core B support" 12 bool "Enable Core B loader"
13 default y 13 default y
14 14
15config BF561_COREB_RESET
16 bool "Enable Core B reset support"
17 default n
18 help
19 This requires code in the application that is loaded
20 into Core B. In order to reset, the application needs
21 to install an interrupt handler for Supplemental
22 Interrupt 0, that sets RETI to 0xff600000 and writes
23 bit 11 of SICB_SYSCR when bit 5 of SICA_SYSCR is 0.
24 This causes Core B to stall when Supplemental Interrupt
25 0 is set, and will reset PC to 0xff600000 when
26 COREB_SRAM_INIT is cleared.
27
28endif 15endif
29 16
30comment "Interrupt Priority Assignment" 17comment "Interrupt Priority Assignment"
@@ -138,6 +125,7 @@ config IRQ_DMA2_11
138 default 9 125 default 9
139config IRQ_TIMER0 126config IRQ_TIMER0
140 int "TIMER 0 Interrupt" 127 int "TIMER 0 Interrupt"
128 default 7 if TICKSOURCE_GPTMR0
141 default 8 129 default 8
142config IRQ_TIMER1 130config IRQ_TIMER1
143 int "TIMER 1 Interrupt" 131 int "TIMER 1 Interrupt"
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index f623c6b0719f..0dd9685e5d53 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -83,7 +83,7 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
83}; 83};
84#endif 84#endif
85 85
86#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 86#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
87/* SPI ADC chip */ 87/* SPI ADC chip */
88static struct bfin5xx_spi_chip spi_adc_chip_info = { 88static struct bfin5xx_spi_chip spi_adc_chip_info = {
89 .enable_dma = 1, /* use dma transfer with this chip*/ 89 .enable_dma = 1, /* use dma transfer with this chip*/
@@ -126,7 +126,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
126 }, 126 },
127#endif 127#endif
128 128
129#if defined(CONFIG_SPI_ADC_BF533) || defined(CONFIG_SPI_ADC_BF533_MODULE) 129#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
130 { 130 {
131 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ 131 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
132 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ 132 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 8598098c0840..93635a766f9c 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -1,406 +1,74 @@
1/* 1/* Load firmware into Core B on a BF561
2 * File: arch/blackfin/mach-bf561/coreb.c
3 * Based on:
4 * Author:
5 * 2 *
6 * Created: 3 * Copyright 2004-2009 Analog Devices Inc.
7 * Description: Handle CoreB on a BF561 4 * Licensed under the GPL-2 or later.
8 * 5 */
9 * Modified: 6
10 * Copyright 2004-2006 Analog Devices Inc. 7/* The Core B reset func requires code in the application that is loaded into
11 * 8 * Core B. In order to reset, the application needs to install an interrupt
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 9 * handler for Supplemental Interrupt 0, that sets RETI to 0xff600000 and
13 * 10 * writes bit 11 of SICB_SYSCR when bit 5 of SICA_SYSCR is 0. This causes Core
14 * This program is free software; you can redistribute it and/or modify 11 * B to stall when Supplemental Interrupt 0 is set, and will reset PC to
15 * it under the terms of the GNU General Public License as published by 12 * 0xff600000 when COREB_SRAM_INIT is cleared.
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 13 */
29 14
30#include <linux/mm.h>
31#include <linux/miscdevice.h>
32#include <linux/device.h> 15#include <linux/device.h>
33#include <linux/ioport.h>
34#include <linux/module.h>
35#include <linux/uaccess.h>
36#include <linux/fs.h> 16#include <linux/fs.h>
37#include <asm/dma.h> 17#include <linux/kernel.h>
38#include <asm/cacheflush.h> 18#include <linux/miscdevice.h>
39 19#include <linux/module.h>
40#define MODULE_VER "v0.1"
41
42static spinlock_t coreb_lock;
43static wait_queue_head_t coreb_dma_wait;
44
45#define COREB_IS_OPEN 0x00000001
46#define COREB_IS_RUNNING 0x00000010
47 20
48#define CMD_COREB_INDEX 1
49#define CMD_COREB_START 2 21#define CMD_COREB_START 2
50#define CMD_COREB_STOP 3 22#define CMD_COREB_STOP 3
51#define CMD_COREB_RESET 4 23#define CMD_COREB_RESET 4
52 24
53#define COREB_MINOR 229 25static int
54 26coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
55static unsigned long coreb_status = 0;
56static unsigned long coreb_base = 0xff600000;
57static unsigned long coreb_size = 0x4000;
58int coreb_dma_done;
59
60static loff_t coreb_lseek(struct file *file, loff_t offset, int origin);
61static ssize_t coreb_read(struct file *file, char *buf, size_t count,
62 loff_t * ppos);
63static ssize_t coreb_write(struct file *file, const char *buf, size_t count,
64 loff_t * ppos);
65static int coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
66 unsigned long arg);
67static int coreb_open(struct inode *inode, struct file *file);
68static int coreb_release(struct inode *inode, struct file *file);
69
70static irqreturn_t coreb_dma_interrupt(int irq, void *dev_id)
71{
72 clear_dma_irqstat(CH_MEM_STREAM2_DEST);
73 coreb_dma_done = 1;
74 wake_up_interruptible(&coreb_dma_wait);
75 return IRQ_HANDLED;
76}
77
78static ssize_t coreb_write(struct file *file, const char *buf, size_t count,
79 loff_t * ppos)
80{
81 unsigned long p = *ppos;
82 ssize_t wrote = 0;
83
84 if (p + count > coreb_size)
85 return -EFAULT;
86
87 while (count > 0) {
88 int len = count;
89
90 if (len > PAGE_SIZE)
91 len = PAGE_SIZE;
92
93 coreb_dma_done = 0;
94
95 flush_dcache_range((unsigned long)buf, (unsigned long)(buf+len));
96 /* Source Channel */
97 set_dma_start_addr(CH_MEM_STREAM2_SRC, (unsigned long)buf);
98 set_dma_x_count(CH_MEM_STREAM2_SRC, len);
99 set_dma_x_modify(CH_MEM_STREAM2_SRC, sizeof(char));
100 set_dma_config(CH_MEM_STREAM2_SRC, 0);
101 /* Destination Channel */
102 set_dma_start_addr(CH_MEM_STREAM2_DEST, coreb_base + p);
103 set_dma_x_count(CH_MEM_STREAM2_DEST, len);
104 set_dma_x_modify(CH_MEM_STREAM2_DEST, sizeof(char));
105 set_dma_config(CH_MEM_STREAM2_DEST, WNR | RESTART | DI_EN);
106
107 enable_dma(CH_MEM_STREAM2_SRC);
108 enable_dma(CH_MEM_STREAM2_DEST);
109
110 wait_event_interruptible(coreb_dma_wait, coreb_dma_done);
111
112 disable_dma(CH_MEM_STREAM2_SRC);
113 disable_dma(CH_MEM_STREAM2_DEST);
114
115 count -= len;
116 wrote += len;
117 buf += len;
118 p += len;
119 }
120 *ppos = p;
121 return wrote;
122}
123
124static ssize_t coreb_read(struct file *file, char *buf, size_t count,
125 loff_t * ppos)
126{
127 unsigned long p = *ppos;
128 ssize_t read = 0;
129
130 if ((p + count) > coreb_size)
131 return -EFAULT;
132
133 while (count > 0) {
134 int len = count;
135
136 if (len > PAGE_SIZE)
137 len = PAGE_SIZE;
138
139 coreb_dma_done = 0;
140
141 invalidate_dcache_range((unsigned long)buf, (unsigned long)(buf+len));
142 /* Source Channel */
143 set_dma_start_addr(CH_MEM_STREAM2_SRC, coreb_base + p);
144 set_dma_x_count(CH_MEM_STREAM2_SRC, len);
145 set_dma_x_modify(CH_MEM_STREAM2_SRC, sizeof(char));
146 set_dma_config(CH_MEM_STREAM2_SRC, 0);
147 /* Destination Channel */
148 set_dma_start_addr(CH_MEM_STREAM2_DEST, (unsigned long)buf);
149 set_dma_x_count(CH_MEM_STREAM2_DEST, len);
150 set_dma_x_modify(CH_MEM_STREAM2_DEST, sizeof(char));
151 set_dma_config(CH_MEM_STREAM2_DEST, WNR | RESTART | DI_EN);
152
153 enable_dma(CH_MEM_STREAM2_SRC);
154 enable_dma(CH_MEM_STREAM2_DEST);
155
156 wait_event_interruptible(coreb_dma_wait, coreb_dma_done);
157
158 disable_dma(CH_MEM_STREAM2_SRC);
159 disable_dma(CH_MEM_STREAM2_DEST);
160
161 count -= len;
162 read += len;
163 buf += len;
164 p += len;
165 }
166
167 return read;
168}
169
170static loff_t coreb_lseek(struct file *file, loff_t offset, int origin)
171{ 27{
172 loff_t ret; 28 int ret = 0;
173
174 mutex_lock(&file->f_dentry->d_inode->i_mutex);
175
176 switch (origin) {
177 case 0 /* SEEK_SET */ :
178 if (offset < coreb_size) {
179 file->f_pos = offset;
180 ret = file->f_pos;
181 } else
182 ret = -EINVAL;
183 break;
184 case 1 /* SEEK_CUR */ :
185 if ((offset + file->f_pos) < coreb_size) {
186 file->f_pos += offset;
187 ret = file->f_pos;
188 } else
189 ret = -EINVAL;
190 default:
191 ret = -EINVAL;
192 }
193 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
194 return ret;
195}
196
197/* No BKL needed here */
198static int coreb_open(struct inode *inode, struct file *file)
199{
200 spin_lock_irq(&coreb_lock);
201
202 if (coreb_status & COREB_IS_OPEN)
203 goto out_busy;
204
205 coreb_status |= COREB_IS_OPEN;
206
207 spin_unlock_irq(&coreb_lock);
208 return 0;
209
210 out_busy:
211 spin_unlock_irq(&coreb_lock);
212 return -EBUSY;
213}
214
215static int coreb_release(struct inode *inode, struct file *file)
216{
217 spin_lock_irq(&coreb_lock);
218 coreb_status &= ~COREB_IS_OPEN;
219 spin_unlock_irq(&coreb_lock);
220 return 0;
221}
222
223static int coreb_ioctl(struct inode *inode, struct file *file,
224 unsigned int cmd, unsigned long arg)
225{
226 int retval = 0;
227 int coreb_index = 0;
228 29
229 switch (cmd) { 30 switch (cmd) {
230 case CMD_COREB_INDEX:
231 if (copy_from_user(&coreb_index, (int *)arg, sizeof(int))) {
232 retval = -EFAULT;
233 break;
234 }
235
236 spin_lock_irq(&coreb_lock);
237 switch (coreb_index) {
238 case 0:
239 coreb_base = 0xff600000;
240 coreb_size = 0x4000;
241 break;
242 case 1:
243 coreb_base = 0xff610000;
244 coreb_size = 0x4000;
245 break;
246 case 2:
247 coreb_base = 0xff500000;
248 coreb_size = 0x8000;
249 break;
250 case 3:
251 coreb_base = 0xff400000;
252 coreb_size = 0x8000;
253 break;
254 default:
255 retval = -EINVAL;
256 break;
257 }
258 spin_unlock_irq(&coreb_lock);
259
260 mutex_lock(&file->f_dentry->d_inode->i_mutex);
261 file->f_pos = 0;
262 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
263 break;
264 case CMD_COREB_START: 31 case CMD_COREB_START:
265 spin_lock_irq(&coreb_lock);
266 if (coreb_status & COREB_IS_RUNNING) {
267 retval = -EBUSY;
268 break;
269 }
270 printk(KERN_INFO "Starting Core B\n");
271 coreb_status |= COREB_IS_RUNNING;
272 bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~0x0020); 32 bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~0x0020);
273 SSYNC();
274 spin_unlock_irq(&coreb_lock);
275 break; 33 break;
276#if defined(CONFIG_BF561_COREB_RESET)
277 case CMD_COREB_STOP: 34 case CMD_COREB_STOP:
278 spin_lock_irq(&coreb_lock);
279 printk(KERN_INFO "Stopping Core B\n");
280 bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() | 0x0020); 35 bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() | 0x0020);
281 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080); 36 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080);
282 coreb_status &= ~COREB_IS_RUNNING;
283 spin_unlock_irq(&coreb_lock);
284 break; 37 break;
285 case CMD_COREB_RESET: 38 case CMD_COREB_RESET:
286 printk(KERN_INFO "Resetting Core B\n");
287 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080); 39 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | 0x0080);
288 break; 40 break;
289#endif 41 default:
42 ret = -EINVAL;
43 break;
290 } 44 }
291 45
292 return retval; 46 CSYNC();
47
48 return ret;
293} 49}
294 50
295static struct file_operations coreb_fops = { 51static struct file_operations coreb_fops = {
296 .owner = THIS_MODULE, 52 .owner = THIS_MODULE,
297 .llseek = coreb_lseek, 53 .ioctl = coreb_ioctl,
298 .read = coreb_read,
299 .write = coreb_write,
300 .ioctl = coreb_ioctl,
301 .open = coreb_open,
302 .release = coreb_release
303}; 54};
304 55
305static struct miscdevice coreb_dev = { 56static struct miscdevice coreb_dev = {
306 COREB_MINOR, 57 .minor = MISC_DYNAMIC_MINOR,
307 "coreb", 58 .name = "coreb",
308 &coreb_fops 59 .fops = &coreb_fops,
309}; 60};
310 61
311static ssize_t coreb_show_status(struct device *dev, struct device_attribute *attr, char *buf) 62static int __init bf561_coreb_init(void)
312{ 63{
313 return sprintf(buf, 64 return misc_register(&coreb_dev);
314 "Base Address:\t0x%08lx\n"
315 "Core B is %s\n"
316 "SICA_SYSCR:\t%04x\n"
317 "SICB_SYSCR:\t%04x\n"
318 "\n"
319 "IRQ Status:\tCore A\t\tCore B\n"
320 "ISR0:\t\t%08x\t\t%08x\n"
321 "ISR1:\t\t%08x\t\t%08x\n"
322 "IMASK0:\t\t%08x\t\t%08x\n"
323 "IMASK1:\t\t%08x\t\t%08x\n",
324 coreb_base,
325 coreb_status & COREB_IS_RUNNING ? "running" : "stalled",
326 bfin_read_SICA_SYSCR(), bfin_read_SICB_SYSCR(),
327 bfin_read_SICA_ISR0(), bfin_read_SICB_ISR0(),
328 bfin_read_SICA_ISR1(), bfin_read_SICB_ISR0(),
329 bfin_read_SICA_IMASK0(), bfin_read_SICB_IMASK0(),
330 bfin_read_SICA_IMASK1(), bfin_read_SICB_IMASK1());
331}
332
333static DEVICE_ATTR(coreb_status, S_IRUGO, coreb_show_status, NULL);
334
335int __init bf561_coreb_init(void)
336{
337 init_waitqueue_head(&coreb_dma_wait);
338
339 spin_lock_init(&coreb_lock);
340 /* Request the core memory regions for Core B */
341 if (request_mem_region(0xff600000, 0x4000,
342 "Core B - Instruction SRAM") == NULL)
343 goto exit;
344
345 if (request_mem_region(0xFF610000, 0x4000,
346 "Core B - Instruction SRAM") == NULL)
347 goto release_instruction_a_sram;
348
349 if (request_mem_region(0xFF500000, 0x8000,
350 "Core B - Data Bank B SRAM") == NULL)
351 goto release_instruction_b_sram;
352
353 if (request_mem_region(0xff400000, 0x8000,
354 "Core B - Data Bank A SRAM") == NULL)
355 goto release_data_b_sram;
356
357 if (request_dma(CH_MEM_STREAM2_DEST, "Core B - DMA Destination") < 0)
358 goto release_data_a_sram;
359
360 if (request_dma(CH_MEM_STREAM2_SRC, "Core B - DMA Source") < 0)
361 goto release_dma_dest;
362
363 set_dma_callback(CH_MEM_STREAM2_DEST, coreb_dma_interrupt, NULL);
364
365 misc_register(&coreb_dev);
366
367 if (device_create_file(coreb_dev.this_device, &dev_attr_coreb_status))
368 goto release_dma_src;
369
370 printk(KERN_INFO "BF561 Core B driver %s initialized.\n", MODULE_VER);
371 return 0;
372
373 release_dma_src:
374 free_dma(CH_MEM_STREAM2_SRC);
375 release_dma_dest:
376 free_dma(CH_MEM_STREAM2_DEST);
377 release_data_a_sram:
378 release_mem_region(0xff400000, 0x8000);
379 release_data_b_sram:
380 release_mem_region(0xff500000, 0x8000);
381 release_instruction_b_sram:
382 release_mem_region(0xff610000, 0x4000);
383 release_instruction_a_sram:
384 release_mem_region(0xff600000, 0x4000);
385 exit:
386 return -ENOMEM;
387} 65}
66module_init(bf561_coreb_init);
388 67
389void __exit bf561_coreb_exit(void) 68static void __exit bf561_coreb_exit(void)
390{ 69{
391 device_remove_file(coreb_dev.this_device, &dev_attr_coreb_status);
392 misc_deregister(&coreb_dev); 70 misc_deregister(&coreb_dev);
393
394 release_mem_region(0xff610000, 0x4000);
395 release_mem_region(0xff600000, 0x4000);
396 release_mem_region(0xff500000, 0x8000);
397 release_mem_region(0xff400000, 0x8000);
398
399 free_dma(CH_MEM_STREAM2_DEST);
400 free_dma(CH_MEM_STREAM2_SRC);
401} 71}
402
403module_init(bf561_coreb_init);
404module_exit(bf561_coreb_exit); 72module_exit(bf561_coreb_exit);
405 73
406MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>"); 74MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>");
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index d0b0b3506440..dccd396cd931 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -6,7 +6,7 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9/* This file shoule be up to date with: 9/* This file should be up to date with:
10 * - Revision Q, 11/07/2008; ADSP-BF561 Blackfin Processor Anomaly List 10 * - Revision Q, 11/07/2008; ADSP-BF561 Blackfin Processor Anomaly List
11 */ 11 */
12 12
@@ -18,11 +18,11 @@
18# error will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4 18# error will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4
19#endif 19#endif
20 20
21/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot 2 Not Supported */ 21/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
22#define ANOMALY_05000074 (1) 22#define ANOMALY_05000074 (1)
23/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */ 23/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
24#define ANOMALY_05000099 (__SILICON_REVISION__ < 5) 24#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
25/* Trace Buffers may contain errors in emulation mode and/or exception, NMI, reset handlers */ 25/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
26#define ANOMALY_05000116 (__SILICON_REVISION__ < 3) 26#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
27/* Testset instructions restricted to 32-bit aligned memory locations */ 27/* Testset instructions restricted to 32-bit aligned memory locations */
28#define ANOMALY_05000120 (1) 28#define ANOMALY_05000120 (1)
@@ -40,7 +40,7 @@
40#define ANOMALY_05000136 (__SILICON_REVISION__ < 3) 40#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
41/* Allowing the SPORT RX FIFO to fill will cause an overflow */ 41/* Allowing the SPORT RX FIFO to fill will cause an overflow */
42#define ANOMALY_05000140 (__SILICON_REVISION__ < 3) 42#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
43/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */ 43/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
44#define ANOMALY_05000141 (__SILICON_REVISION__ < 3) 44#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
45/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */ 45/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
46#define ANOMALY_05000142 (__SILICON_REVISION__ < 3) 46#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -80,7 +80,7 @@
80#define ANOMALY_05000163 (__SILICON_REVISION__ < 3) 80#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
81/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */ 81/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
82#define ANOMALY_05000166 (1) 82#define ANOMALY_05000166 (1)
83/* Turning Serial Ports on with External Frame Syncs */ 83/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
84#define ANOMALY_05000167 (1) 84#define ANOMALY_05000167 (1)
85/* SDRAM auto-refresh and subsequent Power Ups */ 85/* SDRAM auto-refresh and subsequent Power Ups */
86#define ANOMALY_05000168 (__SILICON_REVISION__ < 5) 86#define ANOMALY_05000168 (__SILICON_REVISION__ < 5)
@@ -164,7 +164,7 @@
164#define ANOMALY_05000242 (__SILICON_REVISION__ < 5) 164#define ANOMALY_05000242 (__SILICON_REVISION__ < 5)
165/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */ 165/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
166#define ANOMALY_05000244 (__SILICON_REVISION__ < 5) 166#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
167/* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */ 167/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
168#define ANOMALY_05000245 (__SILICON_REVISION__ < 5) 168#define ANOMALY_05000245 (__SILICON_REVISION__ < 5)
169/* TESTSET operation forces stall on the other core */ 169/* TESTSET operation forces stall on the other core */
170#define ANOMALY_05000248 (__SILICON_REVISION__ < 5) 170#define ANOMALY_05000248 (__SILICON_REVISION__ < 5)
@@ -208,7 +208,7 @@
208#define ANOMALY_05000275 (__SILICON_REVISION__ > 2) 208#define ANOMALY_05000275 (__SILICON_REVISION__ > 2)
209/* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */ 209/* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */
210#define ANOMALY_05000276 (__SILICON_REVISION__ < 5) 210#define ANOMALY_05000276 (__SILICON_REVISION__ < 5)
211/* Writes to an I/O data register one SCLK cycle after an edge is detected may clear interrupt */ 211/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
212#define ANOMALY_05000277 (__SILICON_REVISION__ < 3) 212#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
213/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 213/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
214#define ANOMALY_05000278 (__SILICON_REVISION__ < 5) 214#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
@@ -232,7 +232,7 @@
232#define ANOMALY_05000310 (1) 232#define ANOMALY_05000310 (1)
233/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */ 233/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
234#define ANOMALY_05000312 (1) 234#define ANOMALY_05000312 (1)
235/* PPI Is Level-Sensitive on First Transfer */ 235/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
236#define ANOMALY_05000313 (1) 236#define ANOMALY_05000313 (1)
237/* Killed System MMR Write Completes Erroneously On Next System MMR Access */ 237/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
238#define ANOMALY_05000315 (1) 238#define ANOMALY_05000315 (1)
@@ -276,18 +276,27 @@
276#define ANOMALY_05000428 (__SILICON_REVISION__ > 3) 276#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
277/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 277/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
278#define ANOMALY_05000443 (1) 278#define ANOMALY_05000443 (1)
279/* False Hardware Error when RETI points to invalid memory */
280#define ANOMALY_05000461 (1)
279 281
280/* Anomalies that don't exist on this proc */ 282/* Anomalies that don't exist on this proc */
283#define ANOMALY_05000119 (0)
281#define ANOMALY_05000158 (0) 284#define ANOMALY_05000158 (0)
282#define ANOMALY_05000183 (0) 285#define ANOMALY_05000183 (0)
286#define ANOMALY_05000233 (0)
283#define ANOMALY_05000273 (0) 287#define ANOMALY_05000273 (0)
284#define ANOMALY_05000311 (0) 288#define ANOMALY_05000311 (0)
285#define ANOMALY_05000353 (1) 289#define ANOMALY_05000353 (1)
286#define ANOMALY_05000380 (0) 290#define ANOMALY_05000380 (0)
287#define ANOMALY_05000386 (1) 291#define ANOMALY_05000386 (1)
292#define ANOMALY_05000389 (0)
293#define ANOMALY_05000400 (0)
294#define ANOMALY_05000430 (0)
288#define ANOMALY_05000432 (0) 295#define ANOMALY_05000432 (0)
289#define ANOMALY_05000435 (0) 296#define ANOMALY_05000435 (0)
290#define ANOMALY_05000447 (0) 297#define ANOMALY_05000447 (0)
291#define ANOMALY_05000448 (0) 298#define ANOMALY_05000448 (0)
299#define ANOMALY_05000456 (0)
300#define ANOMALY_05000450 (0)
292 301
293#endif 302#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
index 95d609f11c97..9d9858c2be68 100644
--- a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
@@ -1526,6 +1526,35 @@
1526#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA1_D0_START_ADDR() 1526#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA1_D0_START_ADDR()
1527#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val) 1527#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val)
1528 1528
1529#define bfin_read_MDMA_S1_CONFIG() bfin_read_MDMA1_S1_CONFIG()
1530#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA1_S1_CONFIG(val)
1531#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read_MDMA1_S1_IRQ_STATUS()
1532#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA1_S1_IRQ_STATUS(val)
1533#define bfin_read_MDMA_S1_X_MODIFY() bfin_read_MDMA1_S1_X_MODIFY()
1534#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA1_S1_X_MODIFY(val)
1535#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read_MDMA1_S1_Y_MODIFY()
1536#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA1_S1_Y_MODIFY(val)
1537#define bfin_read_MDMA_S1_X_COUNT() bfin_read_MDMA1_S1_X_COUNT()
1538#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA1_S1_X_COUNT(val)
1539#define bfin_read_MDMA_S1_Y_COUNT() bfin_read_MDMA1_S1_Y_COUNT()
1540#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA1_S1_Y_COUNT(val)
1541#define bfin_read_MDMA_S1_START_ADDR() bfin_read_MDMA1_S1_START_ADDR()
1542#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA1_S1_START_ADDR(val)
1543#define bfin_read_MDMA_D1_CONFIG() bfin_read_MDMA1_D1_CONFIG()
1544#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA1_D1_CONFIG(val)
1545#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read_MDMA1_D1_IRQ_STATUS()
1546#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA1_D1_IRQ_STATUS(val)
1547#define bfin_read_MDMA_D1_X_MODIFY() bfin_read_MDMA1_D1_X_MODIFY()
1548#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA1_D1_X_MODIFY(val)
1549#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read_MDMA1_D1_Y_MODIFY()
1550#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA1_D1_Y_MODIFY(val)
1551#define bfin_read_MDMA_D1_X_COUNT() bfin_read_MDMA1_D1_X_COUNT()
1552#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA1_D1_X_COUNT(val)
1553#define bfin_read_MDMA_D1_Y_COUNT() bfin_read_MDMA1_D1_Y_COUNT()
1554#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA1_D1_Y_COUNT(val)
1555#define bfin_read_MDMA_D1_START_ADDR() bfin_read_MDMA1_D1_START_ADDR()
1556#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA1_D1_START_ADDR(val)
1557
1529/* These need to be last due to the cdef/linux inter-dependencies */ 1558/* These need to be last due to the cdef/linux inter-dependencies */
1530#include <asm/irq.h> 1559#include <asm/irq.h>
1531 1560
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index cf922295f4ce..5fc0f05026e0 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -796,6 +796,62 @@
796#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */ 796#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */
797#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */ 797#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */
798 798
799#define MDMA_D0_NEXT_DESC_PTR MDMA1_D0_NEXT_DESC_PTR
800#define MDMA_D0_START_ADDR MDMA1_D0_START_ADDR
801#define MDMA_D0_CONFIG MDMA1_D0_CONFIG
802#define MDMA_D0_X_COUNT MDMA1_D0_X_COUNT
803#define MDMA_D0_X_MODIFY MDMA1_D0_X_MODIFY
804#define MDMA_D0_Y_COUNT MDMA1_D0_Y_COUNT
805#define MDMA_D0_Y_MODIFY MDMA1_D0_Y_MODIFY
806#define MDMA_D0_CURR_DESC_PTR MDMA1_D0_CURR_DESC_PTR
807#define MDMA_D0_CURR_ADDR MDMA1_D0_CURR_ADDR
808#define MDMA_D0_IRQ_STATUS MDMA1_D0_IRQ_STATUS
809#define MDMA_D0_PERIPHERAL_MAP MDMA1_D0_PERIPHERAL_MAP
810#define MDMA_D0_CURR_X_COUNT MDMA1_D0_CURR_X_COUNT
811#define MDMA_D0_CURR_Y_COUNT MDMA1_D0_CURR_Y_COUNT
812
813#define MDMA_S0_NEXT_DESC_PTR MDMA1_S0_NEXT_DESC_PTR
814#define MDMA_S0_START_ADDR MDMA1_S0_START_ADDR
815#define MDMA_S0_CONFIG MDMA1_S0_CONFIG
816#define MDMA_S0_X_COUNT MDMA1_S0_X_COUNT
817#define MDMA_S0_X_MODIFY MDMA1_S0_X_MODIFY
818#define MDMA_S0_Y_COUNT MDMA1_S0_Y_COUNT
819#define MDMA_S0_Y_MODIFY MDMA1_S0_Y_MODIFY
820#define MDMA_S0_CURR_DESC_PTR MDMA1_S0_CURR_DESC_PTR
821#define MDMA_S0_CURR_ADDR MDMA1_S0_CURR_ADDR
822#define MDMA_S0_IRQ_STATUS MDMA1_S0_IRQ_STATUS
823#define MDMA_S0_PERIPHERAL_MAP MDMA1_S0_PERIPHERAL_MAP
824#define MDMA_S0_CURR_X_COUNT MDMA1_S0_CURR_X_COUNT
825#define MDMA_S0_CURR_Y_COUNT MDMA1_S0_CURR_Y_COUNT
826
827#define MDMA_D1_NEXT_DESC_PTR MDMA1_D1_NEXT_DESC_PTR
828#define MDMA_D1_START_ADDR MDMA1_D1_START_ADDR
829#define MDMA_D1_CONFIG MDMA1_D1_CONFIG
830#define MDMA_D1_X_COUNT MDMA1_D1_X_COUNT
831#define MDMA_D1_X_MODIFY MDMA1_D1_X_MODIFY
832#define MDMA_D1_Y_COUNT MDMA1_D1_Y_COUNT
833#define MDMA_D1_Y_MODIFY MDMA1_D1_Y_MODIFY
834#define MDMA_D1_CURR_DESC_PTR MDMA1_D1_CURR_DESC_PTR
835#define MDMA_D1_CURR_ADDR MDMA1_D1_CURR_ADDR
836#define MDMA_D1_IRQ_STATUS MDMA1_D1_IRQ_STATUS
837#define MDMA_D1_PERIPHERAL_MAP MDMA1_D1_PERIPHERAL_MAP
838#define MDMA_D1_CURR_X_COUNT MDMA1_D1_CURR_X_COUNT
839#define MDMA_D1_CURR_Y_COUNT MDMA1_D1_CURR_Y_COUNT
840
841#define MDMA_S1_NEXT_DESC_PTR MDMA1_S1_NEXT_DESC_PTR
842#define MDMA_S1_START_ADDR MDMA1_S1_START_ADDR
843#define MDMA_S1_CONFIG MDMA1_S1_CONFIG
844#define MDMA_S1_X_COUNT MDMA1_S1_X_COUNT
845#define MDMA_S1_X_MODIFY MDMA1_S1_X_MODIFY
846#define MDMA_S1_Y_COUNT MDMA1_S1_Y_COUNT
847#define MDMA_S1_Y_MODIFY MDMA1_S1_Y_MODIFY
848#define MDMA_S1_CURR_DESC_PTR MDMA1_S1_CURR_DESC_PTR
849#define MDMA_S1_CURR_ADDR MDMA1_S1_CURR_ADDR
850#define MDMA_S1_IRQ_STATUS MDMA1_S1_IRQ_STATUS
851#define MDMA_S1_PERIPHERAL_MAP MDMA1_S1_PERIPHERAL_MAP
852#define MDMA_S1_CURR_X_COUNT MDMA1_S1_CURR_X_COUNT
853#define MDMA_S1_CURR_Y_COUNT MDMA1_S1_CURR_Y_COUNT
854
799/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */ 855/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
800#define IMDMA_D0_CONFIG 0xFFC01808 /*IMDMA Stream 0 Destination Configuration */ 856#define IMDMA_D0_CONFIG 0xFFC01808 /*IMDMA Stream 0 Destination Configuration */
801#define IMDMA_D0_NEXT_DESC_PTR 0xFFC01800 /*IMDMA Stream 0 Destination Next Descriptor Ptr Reg */ 857#define IMDMA_D0_NEXT_DESC_PTR 0xFFC01800 /*IMDMA Stream 0 Destination Next Descriptor Ptr Reg */
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 9b27e698c0b2..8c10701c251f 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -133,9 +133,9 @@ void __init platform_request_ipi(irq_handler_t handler)
133 int ret; 133 int ret;
134 134
135 ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED, 135 ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED,
136 "SMP interrupt", handler); 136 "Supplemental Interrupt0", handler);
137 if (ret) 137 if (ret)
138 panic("Cannot request supplemental interrupt 0 for IPI service\n"); 138 panic("Cannot request supplemental interrupt 0 for IPI service");
139} 139}
140 140
141void platform_send_ipi(cpumask_t callmap) 141void platform_send_ipi(cpumask_t callmap)
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 80d39b2f9db2..da93d9207165 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -71,3 +71,10 @@
71#if ANOMALY_05000448 71#if ANOMALY_05000448
72# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes. 72# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
73#endif 73#endif
74
75/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
76#if ANOMALY_05000220 && \
77 ((defined(CONFIG_BFIN_WB) && defined(CONFIG_BFIN_L2_NOT_CACHED)) || \
78 (!defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_L2_WB)))
79# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
80#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index aa0648c6a9fe..d9666fe6c3d6 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -15,6 +15,13 @@
15 15
16.text 16.text
17 17
18/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
19#if ANOMALY_05000443
20# define BROK_FLUSH_INST "IFLUSH"
21#else
22# define BROK_FLUSH_INST "no anomaly! yeah!"
23#endif
24
18/* Since all L1 caches work the same way, we use the same method for flushing 25/* Since all L1 caches work the same way, we use the same method for flushing
19 * them. Only the actual flush instruction differs. We write this in asm as 26 * them. Only the actual flush instruction differs. We write this in asm as
20 * GCC can be hard to coax into writing nice hardware loops. 27 * GCC can be hard to coax into writing nice hardware loops.
@@ -23,7 +30,7 @@
23 * R0 = start address 30 * R0 = start address
24 * R1 = end address 31 * R1 = end address
25 */ 32 */
26.macro do_flush flushins:req optflushins optnopins label 33.macro do_flush flushins:req label
27 34
28 R2 = -L1_CACHE_BYTES; 35 R2 = -L1_CACHE_BYTES;
29 36
@@ -44,22 +51,15 @@
44\label : 51\label :
45.endif 52.endif
46 P0 = R0; 53 P0 = R0;
54
47 LSETUP (1f, 2f) LC1 = P1; 55 LSETUP (1f, 2f) LC1 = P1;
481: 561:
49.ifnb \optflushins 57.ifeqs "\flushins", BROK_FLUSH_INST
50 \optflushins [P0];
51.endif
52#if ANOMALY_05000443
53.ifb \optnopins
542:
55.endif
56 \flushins [P0++]; 58 \flushins [P0++];
57.ifnb \optnopins 592: nop;
582: \optnopins; 60.else
59.endif
60#else
612: \flushins [P0++]; 612: \flushins [P0++];
62#endif 62.endif
63 63
64 RTS; 64 RTS;
65.endm 65.endm
@@ -77,25 +77,9 @@ ENTRY(_blackfin_icache_flush_range)
77 */ 77 */
78 P0 = R0; 78 P0 = R0;
79 IFLUSH[P0]; 79 IFLUSH[P0];
80 do_flush IFLUSH, , nop 80 do_flush IFLUSH
81ENDPROC(_blackfin_icache_flush_range) 81ENDPROC(_blackfin_icache_flush_range)
82 82
83/* Flush all cache lines assocoiated with this area of memory. */
84ENTRY(_blackfin_icache_dcache_flush_range)
85/*
86 * Walkaround to avoid loading wrong instruction after invalidating icache
87 * and following sequence is met.
88 *
89 * 1) One instruction address is cached in the instruction cache.
90 * 2) This instruction in SDRAM is changed.
91 * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
92 * 4) This instruction is executed again, but the old one is loaded.
93 */
94 P0 = R0;
95 IFLUSH[P0];
96 do_flush FLUSH, IFLUSH
97ENDPROC(_blackfin_icache_dcache_flush_range)
98
99/* Throw away all D-cached data in specified region without any obligation to 83/* Throw away all D-cached data in specified region without any obligation to
100 * write them back. Since the Blackfin ISA does not have an "invalidate" 84 * write them back. Since the Blackfin ISA does not have an "invalidate"
101 * instruction, we use flush/invalidate. Perhaps as a speed optimization we 85 * instruction, we use flush/invalidate. Perhaps as a speed optimization we
@@ -107,7 +91,7 @@ ENDPROC(_blackfin_dcache_invalidate_range)
107 91
108/* Flush all data cache lines assocoiated with this memory area */ 92/* Flush all data cache lines assocoiated with this memory area */
109ENTRY(_blackfin_dcache_flush_range) 93ENTRY(_blackfin_dcache_flush_range)
110 do_flush FLUSH, , , .Ldfr 94 do_flush FLUSH, .Ldfr
111ENDPROC(_blackfin_dcache_flush_range) 95ENDPROC(_blackfin_dcache_flush_range)
112 96
113/* Our headers convert the page structure to an address, so just need to flush 97/* Our headers convert the page structure to an address, so just need to flush
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 35393651359b..ef6870e9eea6 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -72,6 +72,7 @@ void init_clocks(void)
72#endif 72#endif
73 bfin_write_PLL_LOCKCNT(0x300); 73 bfin_write_PLL_LOCKCNT(0x300);
74 do_sync(); 74 do_sync();
75 /* We always write PLL_CTL thus avoiding Anomaly 05000242 */
75 bfin_write16(PLL_CTL, PLL_CTL_VAL); 76 bfin_write16(PLL_CTL, PLL_CTL_VAL);
76 __asm__ __volatile__("IDLE;"); 77 __asm__ __volatile__("IDLE;");
77 bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV); 78 bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 72e16605ca09..70e3411f558c 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -140,7 +140,8 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
140 cclk = get_cclk() / 1000; 140 cclk = get_cclk() / 1000;
141 sclk = get_sclk() / 1000; 141 sclk = get_sclk() / 1000;
142 142
143#if ANOMALY_05000273 || (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE)) 143#if ANOMALY_05000273 || ANOMALY_05000274 || \
144 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
144 min_cclk = sclk * 2; 145 min_cclk = sclk * 2;
145#else 146#else
146 min_cclk = sclk; 147 min_cclk = sclk;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a063a434f7e3..da0558ad1b1a 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -36,7 +36,6 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <linux/unistd.h> 38#include <linux/unistd.h>
39#include <linux/threads.h>
40#include <asm/blackfin.h> 39#include <asm/blackfin.h>
41#include <asm/errno.h> 40#include <asm/errno.h>
42#include <asm/fixed_code.h> 41#include <asm/fixed_code.h>
@@ -201,7 +200,18 @@ ENTRY(_ex_single_step)
201 cc = r7 == 0; 200 cc = r7 == 0;
202 if !cc jump 1f; 201 if !cc jump 1f;
203#endif 202#endif
204 203#ifdef CONFIG_EXACT_HWERR
204 /* Read the ILAT, and to check to see if the process we are
205 * single stepping caused a previous hardware error
206 * If so, do not single step, (which lowers to IRQ5, and makes
207 * us miss the error).
208 */
209 p5.l = lo(ILAT);
210 p5.h = hi(ILAT);
211 r7 = [p5];
212 cc = bittst(r7, EVT_IVHW_P);
213 if cc jump 1f;
214#endif
205 /* Single stepping only a single instruction, so clear the trace 215 /* Single stepping only a single instruction, so clear the trace
206 * bit here. */ 216 * bit here. */
207 r7 = syscfg; 217 r7 = syscfg;
@@ -263,15 +273,6 @@ ENTRY(_bfin_return_from_exception)
263 r6 = 0x25; 273 r6 = 0x25;
264 CC = R7 == R6; 274 CC = R7 == R6;
265 if CC JUMP _double_fault; 275 if CC JUMP _double_fault;
266
267 /* Did we cause a HW error? */
268 p5.l = lo(ILAT);
269 p5.h = hi(ILAT);
270 r6 = [p5];
271 r7 = 0x20; /* Did I just cause anther HW error? */
272 r6 = r7 & r6;
273 CC = R7 == R6;
274 if CC JUMP _double_fault;
275#endif 276#endif
276 277
277 (R7:6,P5:4) = [sp++]; 278 (R7:6,P5:4) = [sp++];
@@ -473,6 +474,16 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
473 [--sp] = ASTAT; 474 [--sp] = ASTAT;
474 [--sp] = (R7:6,P5:4); 475 [--sp] = (R7:6,P5:4);
475 476
477#ifdef CONFIG_EXACT_HWERR
478 /* Make sure all pending read/writes complete. This will ensure any
479 * accesses which could cause hardware errors completes, and signal
480 * the the hardware before we do something silly, like crash the
481 * kernel. We don't need to work around anomaly 05000312, since
482 * we are already atomic
483 */
484 ssync;
485#endif
486
476#if ANOMALY_05000283 || ANOMALY_05000315 487#if ANOMALY_05000283 || ANOMALY_05000315
477 cc = r7 == r7; 488 cc = r7 == r7;
478 p5.h = HI(CHIPID); 489 p5.h = HI(CHIPID);
@@ -855,7 +866,7 @@ ENTRY(_ret_from_exception)
855 p1.h = _schedule_and_signal; 866 p1.h = _schedule_and_signal;
856 [p0] = p1; 867 [p0] = p1;
857 csync; 868 csync;
858 raise 15; /* raise evt14 to do signal or reschedule */ 869 raise 15; /* raise evt15 to do signal or reschedule */
8594: 8704:
860 r0 = syscfg; 871 r0 = syscfg;
861 bitclr(r0, 0); 872 bitclr(r0, 0);
@@ -916,7 +927,7 @@ ENTRY(_return_from_int)
916 p1.h = _schedule_and_signal_from_int; 927 p1.h = _schedule_and_signal_from_int;
917 [p0] = p1; 928 [p0] = p1;
918 csync; 929 csync;
919#if ANOMALY_05000281 930#if ANOMALY_05000281 || ANOMALY_05000461
920 r0.l = lo(SAFE_USER_INSTRUCTION); 931 r0.l = lo(SAFE_USER_INSTRUCTION);
921 r0.h = hi(SAFE_USER_INSTRUCTION); 932 r0.h = hi(SAFE_USER_INSTRUCTION);
922 reti = r0; 933 reti = r0;
@@ -930,18 +941,27 @@ ENTRY(_return_from_int)
930ENDPROC(_return_from_int) 941ENDPROC(_return_from_int)
931 942
932ENTRY(_lower_to_irq14) 943ENTRY(_lower_to_irq14)
933#if ANOMALY_05000281 944#if ANOMALY_05000281 || ANOMALY_05000461
934 r0.l = lo(SAFE_USER_INSTRUCTION); 945 r0.l = lo(SAFE_USER_INSTRUCTION);
935 r0.h = hi(SAFE_USER_INSTRUCTION); 946 r0.h = hi(SAFE_USER_INSTRUCTION);
936 reti = r0; 947 reti = r0;
937#endif 948#endif
938 r0 = 0x401f; 949
950#ifdef CONFIG_DEBUG_HWERR
951 /* enable irq14 & hwerr interrupt, until we transition to _evt14_softirq */
952 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
953#else
954 /* Only enable irq14 interrupt, until we transition to _evt14_softirq */
955 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
956#endif
939 sti r0; 957 sti r0;
940 raise 14; 958 raise 14;
941 rti; 959 rti;
960ENDPROC(_lower_to_irq14)
961
942ENTRY(_evt14_softirq) 962ENTRY(_evt14_softirq)
943#ifdef CONFIG_DEBUG_HWERR 963#ifdef CONFIG_DEBUG_HWERR
944 r0 = 0x3f; 964 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
945 sti r0; 965 sti r0;
946#else 966#else
947 cli r0; 967 cli r0;
@@ -949,8 +969,9 @@ ENTRY(_evt14_softirq)
949 [--sp] = RETI; 969 [--sp] = RETI;
950 SP += 4; 970 SP += 4;
951 rts; 971 rts;
972ENDPROC(_evt14_softirq)
952 973
953_schedule_and_signal_from_int: 974ENTRY(_schedule_and_signal_from_int)
954 /* To end up here, vector 15 was changed - so we have to change it 975 /* To end up here, vector 15 was changed - so we have to change it
955 * back. 976 * back.
956 */ 977 */
@@ -983,8 +1004,9 @@ _schedule_and_signal_from_int:
983 call _finish_atomic_sections; 1004 call _finish_atomic_sections;
984 sp += 12; 1005 sp += 12;
985 jump.s .Lresume_userspace; 1006 jump.s .Lresume_userspace;
1007ENDPROC(_schedule_and_signal_from_int)
986 1008
987_schedule_and_signal: 1009ENTRY(_schedule_and_signal)
988 SAVE_CONTEXT_SYSCALL 1010 SAVE_CONTEXT_SYSCALL
989 /* To end up here, vector 15 was changed - so we have to change it 1011 /* To end up here, vector 15 was changed - so we have to change it
990 * back. 1012 * back.
@@ -1002,7 +1024,7 @@ _schedule_and_signal:
10021: 10241:
1003 RESTORE_CONTEXT 1025 RESTORE_CONTEXT
1004 rti; 1026 rti;
1005ENDPROC(_lower_to_irq14) 1027ENDPROC(_schedule_and_signal)
1006 1028
1007/* We handle this 100% in exception space - to reduce overhead 1029/* We handle this 100% in exception space - to reduce overhead
1008 * Only potiential problem is if the software buffer gets swapped out of the 1030 * Only potiential problem is if the software buffer gets swapped out of the
@@ -1588,19 +1610,3 @@ ENTRY(_sys_call_table)
1588 .long _sys_ni_syscall 1610 .long _sys_ni_syscall
1589 .endr 1611 .endr
1590END(_sys_call_table) 1612END(_sys_call_table)
1591
1592#ifdef CONFIG_EXCEPTION_L1_SCRATCH
1593/* .section .l1.bss.scratch */
1594.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
1595#else
1596#ifdef CONFIG_SYSCALL_TAB_L1
1597.section .l1.bss
1598#else
1599.bss
1600#endif
1601ENTRY(_exception_stack)
1602 .rept 1024 * NR_CPUS
1603 .long 0
1604 .endr
1605_exception_stack_top:
1606#endif
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 698d4c05947e..f826f6b9f917 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -30,8 +30,6 @@ ENTRY(__init_clear_bss)
30 rts; 30 rts;
31ENDPROC(__init_clear_bss) 31ENDPROC(__init_clear_bss)
32 32
33#define INITIAL_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
34
35ENTRY(__start) 33ENTRY(__start)
36 /* R0: argument of command line string, passed from uboot, save it */ 34 /* R0: argument of command line string, passed from uboot, save it */
37 R7 = R0; 35 R7 = R0;
@@ -126,30 +124,30 @@ ENTRY(__start)
126 * below 124 * below
127 */ 125 */
128 GET_PDA(p0, r0); 126 GET_PDA(p0, r0);
129 r7 = [p0 + PDA_RETX]; 127 r6 = [p0 + PDA_RETX];
130 p1.l = _init_saved_retx; 128 p1.l = _init_saved_retx;
131 p1.h = _init_saved_retx; 129 p1.h = _init_saved_retx;
132 [p1] = r7; 130 [p1] = r6;
133 131
134 r7 = [p0 + PDA_DCPLB]; 132 r6 = [p0 + PDA_DCPLB];
135 p1.l = _init_saved_dcplb_fault_addr; 133 p1.l = _init_saved_dcplb_fault_addr;
136 p1.h = _init_saved_dcplb_fault_addr; 134 p1.h = _init_saved_dcplb_fault_addr;
137 [p1] = r7; 135 [p1] = r6;
138 136
139 r7 = [p0 + PDA_ICPLB]; 137 r6 = [p0 + PDA_ICPLB];
140 p1.l = _init_saved_icplb_fault_addr; 138 p1.l = _init_saved_icplb_fault_addr;
141 p1.h = _init_saved_icplb_fault_addr; 139 p1.h = _init_saved_icplb_fault_addr;
142 [p1] = r7; 140 [p1] = r6;
143 141
144 r7 = [p0 + PDA_SEQSTAT]; 142 r6 = [p0 + PDA_SEQSTAT];
145 p1.l = _init_saved_seqstat; 143 p1.l = _init_saved_seqstat;
146 p1.h = _init_saved_seqstat; 144 p1.h = _init_saved_seqstat;
147 [p1] = r7; 145 [p1] = r6;
148#endif 146#endif
149 147
150 /* Initialize stack pointer */ 148 /* Initialize stack pointer */
151 sp.l = lo(INITIAL_STACK); 149 sp.l = _init_thread_union;
152 sp.h = hi(INITIAL_STACK); 150 sp.h = _init_thread_union;
153 fp = sp; 151 fp = sp;
154 usp = sp; 152 usp = sp;
155 153
@@ -189,7 +187,15 @@ ENTRY(__start)
189 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ 187 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
190 call _bfin_relocate_l1_mem; 188 call _bfin_relocate_l1_mem;
191#ifdef CONFIG_BFIN_KERNEL_CLOCK 189#ifdef CONFIG_BFIN_KERNEL_CLOCK
190 /* Only use on-chip scratch space for stack when absolutely required
191 * to avoid Anomaly 05000227 ... we know the init_clocks() func only
192 * uses L1 text and stack space and no other memory region.
193 */
194# define KERNEL_CLOCK_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
195 sp.l = lo(KERNEL_CLOCK_STACK);
196 sp.h = hi(KERNEL_CLOCK_STACK);
192 call _init_clocks; 197 call _init_clocks;
198 sp = usp; /* usp hasnt been touched, so restore from there */
193#endif 199#endif
194 200
195 /* This section keeps the processor in supervisor mode 201 /* This section keeps the processor in supervisor mode
@@ -243,9 +249,7 @@ ENTRY(_real_start)
243 call _cmdline_init; 249 call _cmdline_init;
244 250
245 /* Load the current thread pointer and stack */ 251 /* Load the current thread pointer and stack */
246 sp.l = _init_thread_union; 252 p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */
247 sp.h = _init_thread_union;
248 p1 = THREAD_SIZE (z);
249 sp = sp + p1; 253 sp = sp + p1;
250 usp = sp; 254 usp = sp;
251 fp = sp; 255 fp = sp;
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 0069c2dd4625..9c46680186e4 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -145,6 +145,14 @@ __common_int_entry:
145 145
146/* interrupt routine for ivhw - 5 */ 146/* interrupt routine for ivhw - 5 */
147ENTRY(_evt_ivhw) 147ENTRY(_evt_ivhw)
148 /* In case a single action kicks off multiple memory transactions, (like
149 * a cache line fetch, - this can cause multiple hardware errors, let's
150 * catch them all. First - make sure all the actions are complete, and
151 * the core sees the hardware errors.
152 */
153 SSYNC;
154 SSYNC;
155
148 SAVE_ALL_SYS 156 SAVE_ALL_SYS
149#ifdef CONFIG_FRAME_POINTER 157#ifdef CONFIG_FRAME_POINTER
150 fp = 0; 158 fp = 0;
@@ -159,6 +167,25 @@ ENTRY(_evt_ivhw)
1591: 1671:
160#endif 168#endif
161 169
170 /* Handle all stacked hardware errors
171 * To make sure we don't hang forever, only do it 10 times
172 */
173 R0 = 0;
174 R2 = 10;
1751:
176 P0.L = LO(ILAT);
177 P0.H = HI(ILAT);
178 R1 = [P0];
179 CC = BITTST(R1, EVT_IVHW_P);
180 IF ! CC JUMP 2f;
181 /* OK a hardware error is pending - clear it */
182 R1 = EVT_IVHW_P;
183 [P0] = R1;
184 R0 += 1;
185 CC = R1 == R2;
186 if CC JUMP 2f;
187 JUMP 1b;
1882:
162 # We are going to dump something out, so make sure we print IPEND properly 189 # We are going to dump something out, so make sure we print IPEND properly
163 p2.l = lo(IPEND); 190 p2.l = lo(IPEND);
164 p2.h = hi(IPEND); 191 p2.h = hi(IPEND);
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index a7d7b2dd4059..351afd0e36d8 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1052,7 +1052,7 @@ int __init init_arch_irq(void)
1052 set_irq_chained_handler(irq, bfin_demux_error_irq); 1052 set_irq_chained_handler(irq, bfin_demux_error_irq);
1053 break; 1053 break;
1054#endif 1054#endif
1055#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 1055#if defined(CONFIG_TICKSOURCE_GPTMR0)
1056 case IRQ_TIMER0: 1056 case IRQ_TIMER0:
1057 set_irq_handler(irq, handle_percpu_irq); 1057 set_irq_handler(irq, handle_percpu_irq);
1058 break; 1058 break;
@@ -1116,6 +1116,9 @@ int __init init_arch_irq(void)
1116 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 1116 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1117 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 1117 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1118 1118
1119 /* This implicitly covers ANOMALY_05000171
1120 * Boot-ROM code modifies SICA_IWRx wakeup registers
1121 */
1119#ifdef SIC_IWR0 1122#ifdef SIC_IWR0
1120 bfin_write_SIC_IWR0(IWR_DISABLE_ALL); 1123 bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1121# ifdef SIC_IWR1 1124# ifdef SIC_IWR1
@@ -1136,13 +1139,6 @@ int __init init_arch_irq(void)
1136 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 1139 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1137#endif 1140#endif
1138 1141
1139#ifdef CONFIG_IPIPE
1140 for (irq = 0; irq < NR_IRQS; irq++) {
1141 struct irq_desc *desc = irq_to_desc(irq);
1142 desc->ic_prio = __ipipe_get_irq_priority(irq);
1143 }
1144#endif /* CONFIG_IPIPE */
1145
1146 return 0; 1142 return 0;
1147} 1143}
1148 1144
@@ -1156,23 +1152,22 @@ void do_irq(int vec, struct pt_regs *fp)
1156 } else { 1152 } else {
1157 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; 1153 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1158 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; 1154 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1159#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \ 1155#if defined(SIC_ISR0) || defined(SICA_ISR0)
1160 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1161 unsigned long sic_status[3]; 1156 unsigned long sic_status[3];
1162 1157
1163 if (smp_processor_id()) { 1158 if (smp_processor_id()) {
1164#ifdef CONFIG_SMP 1159# ifdef SICB_ISR0
1165 /* This will be optimized out in UP mode. */ 1160 /* This will be optimized out in UP mode. */
1166 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); 1161 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1167 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); 1162 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1168#endif 1163# endif
1169 } else { 1164 } else {
1170 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); 1165 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1171 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); 1166 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1172 } 1167 }
1173#ifdef CONFIG_BF54x 1168# ifdef SIC_ISR2
1174 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); 1169 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1175#endif 1170# endif
1176 for (;; ivg++) { 1171 for (;; ivg++) {
1177 if (ivg >= ivg_stop) { 1172 if (ivg >= ivg_stop) {
1178 atomic_inc(&num_spurious); 1173 atomic_inc(&num_spurious);
@@ -1236,20 +1231,16 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1236 1231
1237 if (likely(vec == EVT_IVTMR_P)) { 1232 if (likely(vec == EVT_IVTMR_P)) {
1238 irq = IRQ_CORETMR; 1233 irq = IRQ_CORETMR;
1239 goto core_tick;
1240 }
1241 1234
1242 SSYNC(); 1235 } else {
1243 1236#if defined(SIC_ISR0) || defined(SICA_ISR0)
1244#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
1245 {
1246 unsigned long sic_status[3]; 1237 unsigned long sic_status[3];
1247 1238
1248 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); 1239 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1249 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); 1240 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1250#ifdef CONFIG_BF54x 1241# ifdef SIC_ISR2
1251 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); 1242 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1252#endif 1243# endif
1253 for (;; ivg++) { 1244 for (;; ivg++) {
1254 if (ivg >= ivg_stop) { 1245 if (ivg >= ivg_stop) {
1255 atomic_inc(&num_spurious); 1246 atomic_inc(&num_spurious);
@@ -1258,9 +1249,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1258 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) 1249 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1259 break; 1250 break;
1260 } 1251 }
1261 }
1262#else 1252#else
1263 {
1264 unsigned long sic_status; 1253 unsigned long sic_status;
1265 1254
1266 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); 1255 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
@@ -1272,15 +1261,13 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1272 } else if (sic_status & ivg->isrflag) 1261 } else if (sic_status & ivg->isrflag)
1273 break; 1262 break;
1274 } 1263 }
1275 }
1276#endif 1264#endif
1277 1265
1278 irq = ivg->irqno; 1266 irq = ivg->irqno;
1267 }
1279 1268
1280 if (irq == IRQ_SYSTMR) { 1269 if (irq == IRQ_SYSTMR) {
1281#ifdef CONFIG_GENERIC_CLOCKEVENTS 1270#ifndef CONFIG_GENERIC_CLOCKEVENTS
1282core_tick:
1283#else
1284 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ 1271 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1285#endif 1272#endif
1286 /* This is basically what we need from the register frame. */ 1273 /* This is basically what we need from the register frame. */
@@ -1292,9 +1279,6 @@ core_tick:
1292 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; 1279 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1293 } 1280 }
1294 1281
1295#ifndef CONFIG_GENERIC_CLOCKEVENTS
1296core_tick:
1297#endif
1298 if (this_domain == ipipe_root_domain) { 1282 if (this_domain == ipipe_root_domain) {
1299 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); 1283 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1300 barrier(); 1284 barrier();
@@ -1312,7 +1296,7 @@ core_tick:
1312 } 1296 }
1313 } 1297 }
1314 1298
1315 return 0; 1299 return 0;
1316} 1300}
1317 1301
1318#endif /* CONFIG_IPIPE */ 1302#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 93eab6146079..3b8ebaee77f2 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -43,8 +43,13 @@
43#include <asm/processor.h> 43#include <asm/processor.h>
44#include <asm/ptrace.h> 44#include <asm/ptrace.h>
45#include <asm/cpu.h> 45#include <asm/cpu.h>
46#include <asm/time.h>
46#include <linux/err.h> 47#include <linux/err.h>
47 48
49/*
50 * Anomaly notes:
51 * 05000120 - we always define corelock as 32-bit integer in L2
52 */
48struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); 53struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
49 54
50void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, 55void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
@@ -352,7 +357,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
352 357
353static void __cpuinit setup_secondary(unsigned int cpu) 358static void __cpuinit setup_secondary(unsigned int cpu)
354{ 359{
355#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)) 360#if !defined(CONFIG_TICKSOURCE_GPTMR0)
356 struct irq_desc *timer_desc; 361 struct irq_desc *timer_desc;
357#endif 362#endif
358 unsigned long ilat; 363 unsigned long ilat;
@@ -364,16 +369,13 @@ static void __cpuinit setup_secondary(unsigned int cpu)
364 bfin_write_ILAT(ilat); 369 bfin_write_ILAT(ilat);
365 CSYNC(); 370 CSYNC();
366 371
367 /* Reserve the PDA space for the secondary CPU. */
368 reserve_pda();
369
370 /* Enable interrupt levels IVG7-15. IARs have been already 372 /* Enable interrupt levels IVG7-15. IARs have been already
371 * programmed by the boot CPU. */ 373 * programmed by the boot CPU. */
372 bfin_irq_flags |= IMASK_IVG15 | 374 bfin_irq_flags |= IMASK_IVG15 |
373 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 375 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
374 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 376 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
375 377
376#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 378#if defined(CONFIG_TICKSOURCE_GPTMR0)
377 /* Power down the core timer, just to play safe. */ 379 /* Power down the core timer, just to play safe. */
378 bfin_write_TCNTL(0); 380 bfin_write_TCNTL(0);
379 381
diff --git a/arch/blackfin/mm/blackfin_sram.h b/arch/blackfin/mm/blackfin_sram.h
index 8cb0945563f9..bc0062884fde 100644
--- a/arch/blackfin/mm/blackfin_sram.h
+++ b/arch/blackfin/mm/blackfin_sram.h
@@ -30,7 +30,6 @@
30#ifndef __BLACKFIN_SRAM_H__ 30#ifndef __BLACKFIN_SRAM_H__
31#define __BLACKFIN_SRAM_H__ 31#define __BLACKFIN_SRAM_H__
32 32
33extern void bfin_sram_init(void);
34extern void *l1sram_alloc(size_t); 33extern void *l1sram_alloc(size_t);
35 34
36#endif 35#endif
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 9c3629b9a689..014a55abd09a 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -52,9 +52,14 @@ static unsigned long empty_bad_page_table;
52 52
53static unsigned long empty_bad_page; 53static unsigned long empty_bad_page;
54 54
55unsigned long empty_zero_page; 55static unsigned long empty_zero_page;
56 56
57extern unsigned long exception_stack[NR_CPUS][1024]; 57#ifndef CONFIG_EXCEPTION_L1_SCRATCH
58#if defined CONFIG_SYSCALL_TAB_L1
59__attribute__((l1_data))
60#endif
61static unsigned long exception_stack[NR_CPUS][1024];
62#endif
58 63
59struct blackfin_pda cpu_pda[NR_CPUS]; 64struct blackfin_pda cpu_pda[NR_CPUS];
60EXPORT_SYMBOL(cpu_pda); 65EXPORT_SYMBOL(cpu_pda);
@@ -117,19 +122,18 @@ asmlinkage void __init init_pda(void)
117 cpu_pda[0].next = &cpu_pda[1]; 122 cpu_pda[0].next = &cpu_pda[1];
118 cpu_pda[1].next = &cpu_pda[0]; 123 cpu_pda[1].next = &cpu_pda[0];
119 124
125#ifdef CONFIG_EXCEPTION_L1_SCRATCH
126 cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \
127 L1_SCRATCH_LENGTH);
128#else
120 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1]; 129 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
130#endif
121 131
122#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
123 cpu_pda[cpu].imask = 0x1f; 133 cpu_pda[cpu].imask = 0x1f;
124#endif 134#endif
125} 135}
126 136
127void __cpuinit reserve_pda(void)
128{
129 printk(KERN_INFO "PDA for CPU%u reserved at %p\n", smp_processor_id(),
130 &cpu_pda[smp_processor_id()]);
131}
132
133void __init mem_init(void) 137void __init mem_init(void)
134{ 138{
135 unsigned int codek = 0, datak = 0, initk = 0; 139 unsigned int codek = 0, datak = 0, initk = 0;
@@ -171,19 +175,6 @@ void __init mem_init(void)
171 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); 175 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
172} 176}
173 177
174static int __init sram_init(void)
175{
176 /* Initialize the blackfin L1 Memory. */
177 bfin_sram_init();
178
179 /* Reserve the PDA space for the boot CPU right after we
180 * initialized the scratch memory allocator.
181 */
182 reserve_pda();
183 return 0;
184}
185pure_initcall(sram_init);
186
187static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) 178static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
188{ 179{
189 unsigned long addr; 180 unsigned long addr;
diff --git a/arch/blackfin/mm/isram-driver.c b/arch/blackfin/mm/isram-driver.c
index 22913e7a1818..c080e70f98b0 100644
--- a/arch/blackfin/mm/isram-driver.c
+++ b/arch/blackfin/mm/isram-driver.c
@@ -125,7 +125,7 @@ static bool isram_check_addr(const void *addr, size_t n)
125{ 125{
126 if ((addr >= (void *)L1_CODE_START) && 126 if ((addr >= (void *)L1_CODE_START) &&
127 (addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))) { 127 (addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))) {
128 if ((addr + n) >= (void *)(L1_CODE_START + L1_CODE_LENGTH)) { 128 if ((addr + n) > (void *)(L1_CODE_START + L1_CODE_LENGTH)) {
129 show_stack(NULL, NULL); 129 show_stack(NULL, NULL);
130 printk(KERN_ERR "isram_memcpy: copy involving %p length " 130 printk(KERN_ERR "isram_memcpy: copy involving %p length "
131 "(%zu) too long\n", addr, n); 131 "(%zu) too long\n", addr, n);
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 530d1393a232..0bc3c4ef0aad 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -83,6 +83,14 @@ static struct kmem_cache *sram_piece_cache;
83static void __init l1sram_init(void) 83static void __init l1sram_init(void)
84{ 84{
85 unsigned int cpu; 85 unsigned int cpu;
86 unsigned long reserve;
87
88#ifdef CONFIG_SMP
89 reserve = 0;
90#else
91 reserve = sizeof(struct l1_scratch_task_info);
92#endif
93
86 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 94 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
87 per_cpu(free_l1_ssram_head, cpu).next = 95 per_cpu(free_l1_ssram_head, cpu).next =
88 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 96 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
@@ -91,8 +99,8 @@ static void __init l1sram_init(void)
91 return; 99 return;
92 } 100 }
93 101
94 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu); 102 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
95 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH; 103 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
96 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; 104 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
97 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; 105 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
98 106
@@ -223,7 +231,7 @@ static void __init l2_sram_init(void)
223 spin_lock_init(&l2_sram_lock); 231 spin_lock_init(&l2_sram_lock);
224} 232}
225 233
226void __init bfin_sram_init(void) 234static int __init bfin_sram_init(void)
227{ 235{
228 sram_piece_cache = kmem_cache_create("sram_piece_cache", 236 sram_piece_cache = kmem_cache_create("sram_piece_cache",
229 sizeof(struct sram_piece), 237 sizeof(struct sram_piece),
@@ -233,7 +241,10 @@ void __init bfin_sram_init(void)
233 l1_data_sram_init(); 241 l1_data_sram_init();
234 l1_inst_sram_init(); 242 l1_inst_sram_init();
235 l2_sram_init(); 243 l2_sram_init();
244
245 return 0;
236} 246}
247pure_initcall(bfin_sram_init);
237 248
238/* SRAM allocate function */ 249/* SRAM allocate function */
239static void *_sram_alloc(size_t size, struct sram_piece *pfree_head, 250static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
@@ -732,6 +743,10 @@ found:
732} 743}
733EXPORT_SYMBOL(sram_free_with_lsl); 744EXPORT_SYMBOL(sram_free_with_lsl);
734 745
746/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
747 * tracked. These are designed for userspace so that when a process exits,
748 * we can safely reap their resources.
749 */
735void *sram_alloc_with_lsl(size_t size, unsigned long flags) 750void *sram_alloc_with_lsl(size_t size, unsigned long flags)
736{ 751{
737 void *addr = NULL; 752 void *addr = NULL;
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index a187833febc8..abc13e368b90 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -48,8 +48,6 @@ void *module_alloc(unsigned long size)
48void module_free(struct module *mod, void *module_region) 48void module_free(struct module *mod, void *module_region)
49{ 49{
50 FREE_MODULE(module_region); 50 FREE_MODULE(module_region);
51 /* FIXME: If module_region == mod->init_region, trim exception
52 table entries. */
53} 51}
54 52
55/* We don't need anything special. */ 53/* We don't need anything special. */
diff --git a/arch/frv/kernel/module.c b/arch/frv/kernel/module.c
index 850d168f69fc..711763c8a6f3 100644
--- a/arch/frv/kernel/module.c
+++ b/arch/frv/kernel/module.c
@@ -35,8 +35,6 @@ void *module_alloc(unsigned long size)
35void module_free(struct module *mod, void *module_region) 35void module_free(struct module *mod, void *module_region)
36{ 36{
37 vfree(module_region); 37 vfree(module_region);
38 /* FIXME: If module_region == mod->init_region, trim exception
39 table entries. */
40} 38}
41 39
42/* We don't need anything special. */ 40/* We don't need anything special. */
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
index cfc9127d2ced..0865e291c20d 100644
--- a/arch/h8300/kernel/module.c
+++ b/arch/h8300/kernel/module.c
@@ -23,8 +23,6 @@ void *module_alloc(unsigned long size)
23void module_free(struct module *mod, void *module_region) 23void module_free(struct module *mod, void *module_region)
24{ 24{
25 vfree(module_region); 25 vfree(module_region);
26 /* FIXME: If module_region == mod->init_region, trim exception
27 table entries. */
28} 26}
29 27
30/* We don't need anything special. */ 28/* We don't need anything special. */
diff --git a/arch/ia64/include/asm/suspend.h b/arch/ia64/include/asm/suspend.h
deleted file mode 100644
index b05bbb6074e2..000000000000
--- a/arch/ia64/include/asm/suspend.h
+++ /dev/null
@@ -1 +0,0 @@
1/* dummy (must be non-empty to prevent prejudicial removal...) */
diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c
index 71c50dd8f870..e95d5ad9285d 100644
--- a/arch/ia64/mm/extable.c
+++ b/arch/ia64/mm/extable.c
@@ -53,6 +53,32 @@ void sort_extable (struct exception_table_entry *start,
53 cmp_ex, swap_ex); 53 cmp_ex, swap_ex);
54} 54}
55 55
56static inline unsigned long ex_to_addr(const struct exception_table_entry *x)
57{
58 return (unsigned long)&x->insn + x->insn;
59}
60
61#ifdef CONFIG_MODULES
62/*
63 * Any entry referring to the module init will be at the beginning or
64 * the end.
65 */
66void trim_init_extable(struct module *m)
67{
68 /*trim the beginning*/
69 while (m->num_exentries &&
70 within_module_init(ex_to_addr(&m->extable[0]), m)) {
71 m->extable++;
72 m->num_exentries--;
73 }
74 /*trim the end*/
75 while (m->num_exentries &&
76 within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]),
77 m))
78 m->num_exentries--;
79}
80#endif /* CONFIG_MODULES */
81
56const struct exception_table_entry * 82const struct exception_table_entry *
57search_extable (const struct exception_table_entry *first, 83search_extable (const struct exception_table_entry *first,
58 const struct exception_table_entry *last, 84 const struct exception_table_entry *last,
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
index 8d4205794380..cb5f37d78d49 100644
--- a/arch/m32r/kernel/module.c
+++ b/arch/m32r/kernel/module.c
@@ -44,8 +44,6 @@ void *module_alloc(unsigned long size)
44void module_free(struct module *mod, void *module_region) 44void module_free(struct module *mod, void *module_region)
45{ 45{
46 vfree(module_region); 46 vfree(module_region);
47 /* FIXME: If module_region == mod->init_region, trim exception
48 table entries. */
49} 47}
50 48
51/* We don't need anything special. */ 49/* We don't need anything special. */
diff --git a/arch/m68k/include/asm/suspend.h b/arch/m68k/include/asm/suspend.h
deleted file mode 100644
index 57b3ddb4d269..000000000000
--- a/arch/m68k/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_SUSPEND_H
2#define _M68K_SUSPEND_H
3
4/* Dummy include. */
5
6#endif /* _M68K_SUSPEND_H */
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index 774862bc6977..cd6bcb1c957e 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -31,8 +31,6 @@ void *module_alloc(unsigned long size)
31void module_free(struct module *mod, void *module_region) 31void module_free(struct module *mod, void *module_region)
32{ 32{
33 vfree(module_region); 33 vfree(module_region);
34 /* FIXME: If module_region == mod->init_region, trim exception
35 table entries. */
36} 34}
37 35
38/* We don't need anything special. */ 36/* We don't need anything special. */
diff --git a/arch/m68knommu/kernel/module.c b/arch/m68knommu/kernel/module.c
index 3b1a2ff61ddc..d11ffae7956a 100644
--- a/arch/m68knommu/kernel/module.c
+++ b/arch/m68knommu/kernel/module.c
@@ -23,8 +23,6 @@ void *module_alloc(unsigned long size)
23void module_free(struct module *mod, void *module_region) 23void module_free(struct module *mod, void *module_region)
24{ 24{
25 vfree(module_region); 25 vfree(module_region);
26 /* FIXME: If module_region == mod->init_region, trim exception
27 table entries. */
28} 26}
29 27
30/* We don't need anything special. */ 28/* We don't need anything special. */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 8cc312b5d4dc..b50b845fdd50 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,6 +6,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
6config MICROBLAZE 6config MICROBLAZE
7 def_bool y 7 def_bool y
8 select HAVE_LMB 8 select HAVE_LMB
9 select ARCH_WANT_OPTIONAL_GPIOLIB
9 10
10config SWAP 11config SWAP
11 def_bool n 12 def_bool n
@@ -49,13 +50,14 @@ config GENERIC_CLOCKEVENTS
49config GENERIC_HARDIRQS_NO__DO_IRQ 50config GENERIC_HARDIRQS_NO__DO_IRQ
50 def_bool y 51 def_bool y
51 52
53config GENERIC_GPIO
54 def_bool y
55
52config PCI 56config PCI
53 depends on !MMU
54 def_bool n 57 def_bool n
55 58
56config NO_DMA 59config NO_DMA
57 depends on !MMU 60 def_bool y
58 def_bool n
59 61
60source "init/Kconfig" 62source "init/Kconfig"
61 63
@@ -72,7 +74,8 @@ source "kernel/Kconfig.preempt"
72source "kernel/Kconfig.hz" 74source "kernel/Kconfig.hz"
73 75
74config MMU 76config MMU
75 def_bool n 77 bool "MMU support"
78 default n
76 79
77config NO_MMU 80config NO_MMU
78 bool 81 bool
@@ -105,9 +108,6 @@ config CMDLINE_FORCE
105config OF 108config OF
106 def_bool y 109 def_bool y
107 110
108config OF_DEVICE
109 def_bool y
110
111config PROC_DEVICETREE 111config PROC_DEVICETREE
112 bool "Support for device tree in /proc" 112 bool "Support for device tree in /proc"
113 depends on PROC_FS 113 depends on PROC_FS
@@ -118,6 +118,113 @@ config PROC_DEVICETREE
118 118
119endmenu 119endmenu
120 120
121menu "Advanced setup"
122
123config ADVANCED_OPTIONS
124 bool "Prompt for advanced kernel configuration options"
125 depends on MMU
126 help
127 This option will enable prompting for a variety of advanced kernel
128 configuration options. These options can cause the kernel to not
129 work if they are set incorrectly, but can be used to optimize certain
130 aspects of kernel memory management.
131
132 Unless you know what you are doing, say N here.
133
134comment "Default settings for advanced configuration options are used"
135 depends on !ADVANCED_OPTIONS
136
137config HIGHMEM_START_BOOL
138 bool "Set high memory pool address"
139 depends on ADVANCED_OPTIONS && HIGHMEM
140 help
141 This option allows you to set the base address of the kernel virtual
142 area used to map high memory pages. This can be useful in
143 optimizing the layout of kernel virtual memory.
144
145 Say N here unless you know what you are doing.
146
147config HIGHMEM_START
148 hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
149 depends on MMU
150 default "0xfe000000"
151
152config LOWMEM_SIZE_BOOL
153 bool "Set maximum low memory"
154 depends on ADVANCED_OPTIONS
155 help
156 This option allows you to set the maximum amount of memory which
157 will be used as "low memory", that is, memory which the kernel can
158 access directly, without having to set up a kernel virtual mapping.
159 This can be useful in optimizing the layout of kernel virtual
160 memory.
161
162 Say N here unless you know what you are doing.
163
164config LOWMEM_SIZE
165 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
166 depends on MMU
167 default "0x30000000"
168
169config KERNEL_START_BOOL
170 bool "Set custom kernel base address"
171 depends on ADVANCED_OPTIONS
172 help
173 This option allows you to set the kernel virtual address at which
174 the kernel will map low memory (the kernel image will be linked at
175 this address). This can be useful in optimizing the virtual memory
176 layout of the system.
177
178 Say N here unless you know what you are doing.
179
180config KERNEL_START
181 hex "Virtual address of kernel base" if KERNEL_START_BOOL
182 default "0xc0000000" if MMU
183 default KERNEL_BASE_ADDR if !MMU
184
185config TASK_SIZE_BOOL
186 bool "Set custom user task size"
187 depends on ADVANCED_OPTIONS
188 help
189 This option allows you to set the amount of virtual address space
190 allocated to user tasks. This can be useful in optimizing the
191 virtual memory layout of the system.
192
193 Say N here unless you know what you are doing.
194
195config TASK_SIZE
196 hex "Size of user task space" if TASK_SIZE_BOOL
197 depends on MMU
198 default "0x80000000"
199
200config CONSISTENT_START_BOOL
201 bool "Set custom consistent memory pool address"
202 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
203 help
204 This option allows you to set the base virtual address
205 of the the consistent memory pool. This pool of virtual
206 memory is used to make consistent memory allocations.
207
208config CONSISTENT_START
209 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
210 depends on MMU
211 default "0xff100000" if NOT_COHERENT_CACHE
212
213config CONSISTENT_SIZE_BOOL
214 bool "Set custom consistent memory pool size"
215 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
216 help
217 This option allows you to set the size of the the
218 consistent memory pool. This pool of virtual memory
219 is used to make consistent memory allocations.
220
221config CONSISTENT_SIZE
222 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
223 depends on MMU
224 default "0x00200000" if NOT_COHERENT_CACHE
225
226endmenu
227
121source "mm/Kconfig" 228source "mm/Kconfig"
122 229
123menu "Exectuable file formats" 230menu "Exectuable file formats"
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index aaadfa701da3..d0bcf80a1136 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -1,4 +1,8 @@
1ifeq ($(CONFIG_MMU),y)
2UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
3else
1UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\" 4UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
5endif
2 6
3# What CPU vesion are we building for, and crack it open 7# What CPU vesion are we building for, and crack it open
4# as major.minor.rev 8# as major.minor.rev
@@ -36,6 +40,8 @@ CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
36# r31 holds current when in kernel mode 40# r31 holds current when in kernel mode
37CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) 41CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
38 42
43LDFLAGS :=
44LDFLAGS_vmlinux :=
39LDFLAGS_BLOB := --format binary --oformat elf32-microblaze 45LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
40 46
41LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) 47LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name)
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 844edf406d34..c2bb043a029d 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -7,6 +7,8 @@ targets := linux.bin linux.bin.gz
7OBJCOPYFLAGS_linux.bin := -O binary 7OBJCOPYFLAGS_linux.bin := -O binary
8 8
9$(obj)/linux.bin: vmlinux FORCE 9$(obj)/linux.bin: vmlinux FORCE
10 [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
11 touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
10 $(call if_changed,objcopy) 12 $(call if_changed,objcopy)
11 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 13 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
12 14
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
new file mode 100644
index 000000000000..bd0b85ec38f5
--- /dev/null
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -0,0 +1,798 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.30-rc6
4# Fri May 22 10:02:33 2009
5#
6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_ARCH_HAS_ILOG2_U32 is not set
10# CONFIG_ARCH_HAS_ILOG2_U64 is not set
11CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y
13CONFIG_GENERIC_HARDIRQS=y
14CONFIG_GENERIC_IRQ_PROBE=y
15CONFIG_GENERIC_CALIBRATE_DELAY=y
16CONFIG_GENERIC_TIME=y
17# CONFIG_GENERIC_TIME_VSYSCALL is not set
18CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y
21CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
22
23#
24# General setup
25#
26CONFIG_EXPERIMENTAL=y
27CONFIG_BROKEN_ON_SMP=y
28CONFIG_INIT_ENV_ARG_LIMIT=32
29CONFIG_LOCALVERSION=""
30CONFIG_LOCALVERSION_AUTO=y
31CONFIG_SYSVIPC=y
32CONFIG_SYSVIPC_SYSCTL=y
33# CONFIG_POSIX_MQUEUE is not set
34# CONFIG_BSD_PROCESS_ACCT is not set
35# CONFIG_TASKSTATS is not set
36# CONFIG_AUDIT is not set
37
38#
39# RCU Subsystem
40#
41CONFIG_CLASSIC_RCU=y
42# CONFIG_TREE_RCU is not set
43# CONFIG_PREEMPT_RCU is not set
44# CONFIG_TREE_RCU_TRACE is not set
45# CONFIG_PREEMPT_RCU_TRACE is not set
46CONFIG_IKCONFIG=y
47CONFIG_IKCONFIG_PROC=y
48CONFIG_LOG_BUF_SHIFT=17
49# CONFIG_GROUP_SCHED is not set
50# CONFIG_CGROUPS is not set
51CONFIG_SYSFS_DEPRECATED=y
52CONFIG_SYSFS_DEPRECATED_V2=y
53# CONFIG_RELAY is not set
54# CONFIG_NAMESPACES is not set
55CONFIG_BLK_DEV_INITRD=y
56CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
57CONFIG_INITRAMFS_ROOT_UID=0
58CONFIG_INITRAMFS_ROOT_GID=0
59CONFIG_RD_GZIP=y
60# CONFIG_RD_BZIP2 is not set
61# CONFIG_RD_LZMA is not set
62CONFIG_INITRAMFS_COMPRESSION_NONE=y
63# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
64# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
65# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
66# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
67CONFIG_SYSCTL=y
68CONFIG_ANON_INODES=y
69CONFIG_EMBEDDED=y
70CONFIG_SYSCTL_SYSCALL=y
71CONFIG_KALLSYMS=y
72CONFIG_KALLSYMS_ALL=y
73CONFIG_KALLSYMS_EXTRA_PASS=y
74# CONFIG_STRIP_ASM_SYMS is not set
75# CONFIG_HOTPLUG is not set
76CONFIG_PRINTK=y
77CONFIG_BUG=y
78CONFIG_ELF_CORE=y
79# CONFIG_BASE_FULL is not set
80# CONFIG_FUTEX is not set
81# CONFIG_EPOLL is not set
82# CONFIG_SIGNALFD is not set
83CONFIG_TIMERFD=y
84CONFIG_EVENTFD=y
85# CONFIG_SHMEM is not set
86CONFIG_AIO=y
87CONFIG_VM_EVENT_COUNTERS=y
88CONFIG_COMPAT_BRK=y
89CONFIG_SLAB=y
90# CONFIG_SLUB is not set
91# CONFIG_SLOB is not set
92# CONFIG_PROFILING is not set
93# CONFIG_MARKERS is not set
94# CONFIG_SLOW_WORK is not set
95# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
96CONFIG_SLABINFO=y
97CONFIG_BASE_SMALL=1
98CONFIG_MODULES=y
99# CONFIG_MODULE_FORCE_LOAD is not set
100CONFIG_MODULE_UNLOAD=y
101# CONFIG_MODULE_FORCE_UNLOAD is not set
102# CONFIG_MODVERSIONS is not set
103# CONFIG_MODULE_SRCVERSION_ALL is not set
104CONFIG_BLOCK=y
105# CONFIG_LBD is not set
106# CONFIG_BLK_DEV_BSG is not set
107# CONFIG_BLK_DEV_INTEGRITY is not set
108
109#
110# IO Schedulers
111#
112CONFIG_IOSCHED_NOOP=y
113CONFIG_IOSCHED_AS=y
114CONFIG_IOSCHED_DEADLINE=y
115CONFIG_IOSCHED_CFQ=y
116# CONFIG_DEFAULT_AS is not set
117# CONFIG_DEFAULT_DEADLINE is not set
118CONFIG_DEFAULT_CFQ=y
119# CONFIG_DEFAULT_NOOP is not set
120CONFIG_DEFAULT_IOSCHED="cfq"
121# CONFIG_FREEZER is not set
122
123#
124# Platform options
125#
126CONFIG_PLATFORM_GENERIC=y
127CONFIG_OPT_LIB_FUNCTION=y
128CONFIG_OPT_LIB_ASM=y
129CONFIG_ALLOW_EDIT_AUTO=y
130
131#
132# Automatic platform settings from Kconfig.auto
133#
134
135#
136# Definitions for MICROBLAZE0
137#
138CONFIG_KERNEL_BASE_ADDR=0x90000000
139CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
140CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
141CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
142CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
143CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
144CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
145CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
146CONFIG_XILINX_MICROBLAZE0_HW_VER="7.10.d"
147
148#
149# Processor type and features
150#
151# CONFIG_NO_HZ is not set
152# CONFIG_HIGH_RES_TIMERS is not set
153CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
154CONFIG_PREEMPT_NONE=y
155# CONFIG_PREEMPT_VOLUNTARY is not set
156# CONFIG_PREEMPT is not set
157CONFIG_HZ_100=y
158# CONFIG_HZ_250 is not set
159# CONFIG_HZ_300 is not set
160# CONFIG_HZ_1000 is not set
161CONFIG_HZ=100
162# CONFIG_SCHED_HRTICK is not set
163CONFIG_MMU=y
164
165#
166# Boot options
167#
168CONFIG_CMDLINE_BOOL=y
169CONFIG_CMDLINE="console=ttyUL0,115200"
170CONFIG_CMDLINE_FORCE=y
171CONFIG_OF=y
172CONFIG_PROC_DEVICETREE=y
173
174#
175# Advanced setup
176#
177# CONFIG_ADVANCED_OPTIONS is not set
178
179#
180# Default settings for advanced configuration options are used
181#
182CONFIG_HIGHMEM_START=0xfe000000
183CONFIG_LOWMEM_SIZE=0x30000000
184CONFIG_KERNEL_START=0xc0000000
185CONFIG_TASK_SIZE=0x80000000
186CONFIG_SELECT_MEMORY_MODEL=y
187CONFIG_FLATMEM_MANUAL=y
188# CONFIG_DISCONTIGMEM_MANUAL is not set
189# CONFIG_SPARSEMEM_MANUAL is not set
190CONFIG_FLATMEM=y
191CONFIG_FLAT_NODE_MEM_MAP=y
192CONFIG_PAGEFLAGS_EXTENDED=y
193CONFIG_SPLIT_PTLOCK_CPUS=4
194# CONFIG_PHYS_ADDR_T_64BIT is not set
195CONFIG_ZONE_DMA_FLAG=0
196CONFIG_VIRT_TO_BUS=y
197CONFIG_UNEVICTABLE_LRU=y
198CONFIG_HAVE_MLOCK=y
199CONFIG_HAVE_MLOCKED_PAGE_BIT=y
200
201#
202# Exectuable file formats
203#
204CONFIG_BINFMT_ELF=y
205# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
206# CONFIG_HAVE_AOUT is not set
207# CONFIG_BINFMT_MISC is not set
208CONFIG_NET=y
209
210#
211# Networking options
212#
213CONFIG_PACKET=y
214# CONFIG_PACKET_MMAP is not set
215CONFIG_UNIX=y
216CONFIG_XFRM=y
217# CONFIG_XFRM_USER is not set
218# CONFIG_XFRM_SUB_POLICY is not set
219# CONFIG_XFRM_MIGRATE is not set
220# CONFIG_XFRM_STATISTICS is not set
221# CONFIG_NET_KEY is not set
222CONFIG_INET=y
223# CONFIG_IP_MULTICAST is not set
224# CONFIG_IP_ADVANCED_ROUTER is not set
225CONFIG_IP_FIB_HASH=y
226# CONFIG_IP_PNP is not set
227# CONFIG_NET_IPIP is not set
228# CONFIG_NET_IPGRE is not set
229# CONFIG_ARPD is not set
230# CONFIG_SYN_COOKIES is not set
231# CONFIG_INET_AH is not set
232# CONFIG_INET_ESP is not set
233# CONFIG_INET_IPCOMP is not set
234# CONFIG_INET_XFRM_TUNNEL is not set
235# CONFIG_INET_TUNNEL is not set
236CONFIG_INET_XFRM_MODE_TRANSPORT=y
237CONFIG_INET_XFRM_MODE_TUNNEL=y
238CONFIG_INET_XFRM_MODE_BEET=y
239# CONFIG_INET_LRO is not set
240CONFIG_INET_DIAG=y
241CONFIG_INET_TCP_DIAG=y
242# CONFIG_TCP_CONG_ADVANCED is not set
243CONFIG_TCP_CONG_CUBIC=y
244CONFIG_DEFAULT_TCP_CONG="cubic"
245# CONFIG_TCP_MD5SIG is not set
246# CONFIG_IPV6 is not set
247# CONFIG_NETWORK_SECMARK is not set
248# CONFIG_NETFILTER is not set
249# CONFIG_IP_DCCP is not set
250# CONFIG_IP_SCTP is not set
251# CONFIG_TIPC is not set
252# CONFIG_ATM is not set
253# CONFIG_BRIDGE is not set
254# CONFIG_NET_DSA is not set
255# CONFIG_VLAN_8021Q is not set
256# CONFIG_DECNET is not set
257# CONFIG_LLC2 is not set
258# CONFIG_IPX is not set
259# CONFIG_ATALK is not set
260# CONFIG_X25 is not set
261# CONFIG_LAPB is not set
262# CONFIG_ECONET is not set
263# CONFIG_WAN_ROUTER is not set
264# CONFIG_PHONET is not set
265# CONFIG_NET_SCHED is not set
266# CONFIG_DCB is not set
267
268#
269# Network testing
270#
271# CONFIG_NET_PKTGEN is not set
272# CONFIG_HAMRADIO is not set
273# CONFIG_CAN is not set
274# CONFIG_IRDA is not set
275# CONFIG_BT is not set
276# CONFIG_AF_RXRPC is not set
277# CONFIG_WIRELESS is not set
278# CONFIG_WIMAX is not set
279# CONFIG_RFKILL is not set
280# CONFIG_NET_9P is not set
281
282#
283# Device Drivers
284#
285
286#
287# Generic Driver Options
288#
289CONFIG_STANDALONE=y
290CONFIG_PREVENT_FIRMWARE_BUILD=y
291# CONFIG_DEBUG_DRIVER is not set
292# CONFIG_DEBUG_DEVRES is not set
293# CONFIG_SYS_HYPERVISOR is not set
294# CONFIG_CONNECTOR is not set
295# CONFIG_MTD is not set
296CONFIG_OF_DEVICE=y
297# CONFIG_PARPORT is not set
298CONFIG_BLK_DEV=y
299# CONFIG_BLK_DEV_COW_COMMON is not set
300# CONFIG_BLK_DEV_LOOP is not set
301# CONFIG_BLK_DEV_NBD is not set
302CONFIG_BLK_DEV_RAM=y
303CONFIG_BLK_DEV_RAM_COUNT=16
304CONFIG_BLK_DEV_RAM_SIZE=8192
305# CONFIG_BLK_DEV_XIP is not set
306# CONFIG_CDROM_PKTCDVD is not set
307# CONFIG_ATA_OVER_ETH is not set
308# CONFIG_XILINX_SYSACE is not set
309CONFIG_MISC_DEVICES=y
310# CONFIG_ENCLOSURE_SERVICES is not set
311# CONFIG_C2PORT is not set
312
313#
314# EEPROM support
315#
316# CONFIG_EEPROM_93CX6 is not set
317
318#
319# SCSI device support
320#
321# CONFIG_RAID_ATTRS is not set
322# CONFIG_SCSI is not set
323# CONFIG_SCSI_DMA is not set
324# CONFIG_SCSI_NETLINK is not set
325# CONFIG_ATA is not set
326# CONFIG_MD is not set
327CONFIG_NETDEVICES=y
328CONFIG_COMPAT_NET_DEV_OPS=y
329# CONFIG_DUMMY is not set
330# CONFIG_BONDING is not set
331# CONFIG_MACVLAN is not set
332# CONFIG_EQUALIZER is not set
333# CONFIG_TUN is not set
334# CONFIG_VETH is not set
335# CONFIG_PHYLIB is not set
336CONFIG_NET_ETHERNET=y
337# CONFIG_MII is not set
338# CONFIG_ETHOC is not set
339# CONFIG_DNET is not set
340# CONFIG_IBM_NEW_EMAC_ZMII is not set
341# CONFIG_IBM_NEW_EMAC_RGMII is not set
342# CONFIG_IBM_NEW_EMAC_TAH is not set
343# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
344# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
345# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
346# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
347# CONFIG_B44 is not set
348CONFIG_NETDEV_1000=y
349CONFIG_NETDEV_10000=y
350
351#
352# Wireless LAN
353#
354# CONFIG_WLAN_PRE80211 is not set
355# CONFIG_WLAN_80211 is not set
356
357#
358# Enable WiMAX (Networking options) to see the WiMAX drivers
359#
360# CONFIG_WAN is not set
361# CONFIG_PPP is not set
362# CONFIG_SLIP is not set
363# CONFIG_NETCONSOLE is not set
364# CONFIG_NETPOLL is not set
365# CONFIG_NET_POLL_CONTROLLER is not set
366# CONFIG_ISDN is not set
367# CONFIG_PHONE is not set
368
369#
370# Input device support
371#
372# CONFIG_INPUT is not set
373
374#
375# Hardware I/O ports
376#
377# CONFIG_SERIO is not set
378# CONFIG_GAMEPORT is not set
379
380#
381# Character devices
382#
383# CONFIG_VT is not set
384CONFIG_DEVKMEM=y
385# CONFIG_SERIAL_NONSTANDARD is not set
386
387#
388# Serial drivers
389#
390# CONFIG_SERIAL_8250 is not set
391
392#
393# Non-8250 serial port support
394#
395CONFIG_SERIAL_UARTLITE=y
396CONFIG_SERIAL_UARTLITE_CONSOLE=y
397CONFIG_SERIAL_CORE=y
398CONFIG_SERIAL_CORE_CONSOLE=y
399CONFIG_UNIX98_PTYS=y
400# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
401CONFIG_LEGACY_PTYS=y
402CONFIG_LEGACY_PTY_COUNT=256
403# CONFIG_IPMI_HANDLER is not set
404# CONFIG_HW_RANDOM is not set
405# CONFIG_RTC is not set
406# CONFIG_GEN_RTC is not set
407# CONFIG_XILINX_HWICAP is not set
408# CONFIG_R3964 is not set
409# CONFIG_RAW_DRIVER is not set
410# CONFIG_TCG_TPM is not set
411# CONFIG_I2C is not set
412# CONFIG_SPI is not set
413CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
414# CONFIG_GPIOLIB is not set
415# CONFIG_W1 is not set
416# CONFIG_POWER_SUPPLY is not set
417# CONFIG_HWMON is not set
418# CONFIG_THERMAL is not set
419# CONFIG_THERMAL_HWMON is not set
420# CONFIG_WATCHDOG is not set
421CONFIG_SSB_POSSIBLE=y
422
423#
424# Sonics Silicon Backplane
425#
426# CONFIG_SSB is not set
427
428#
429# Multifunction device drivers
430#
431# CONFIG_MFD_CORE is not set
432# CONFIG_MFD_SM501 is not set
433# CONFIG_HTC_PASIC3 is not set
434# CONFIG_MFD_TMIO is not set
435# CONFIG_REGULATOR is not set
436
437#
438# Multimedia devices
439#
440
441#
442# Multimedia core support
443#
444# CONFIG_VIDEO_DEV is not set
445# CONFIG_DVB_CORE is not set
446# CONFIG_VIDEO_MEDIA is not set
447
448#
449# Multimedia drivers
450#
451# CONFIG_DAB is not set
452
453#
454# Graphics support
455#
456# CONFIG_VGASTATE is not set
457# CONFIG_VIDEO_OUTPUT_CONTROL is not set
458# CONFIG_FB is not set
459# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
460
461#
462# Display device support
463#
464# CONFIG_DISPLAY_SUPPORT is not set
465# CONFIG_SOUND is not set
466# CONFIG_USB_SUPPORT is not set
467# CONFIG_MMC is not set
468# CONFIG_MEMSTICK is not set
469# CONFIG_NEW_LEDS is not set
470# CONFIG_ACCESSIBILITY is not set
471# CONFIG_RTC_CLASS is not set
472# CONFIG_DMADEVICES is not set
473# CONFIG_AUXDISPLAY is not set
474# CONFIG_UIO is not set
475# CONFIG_STAGING is not set
476
477#
478# File systems
479#
480CONFIG_EXT2_FS=y
481# CONFIG_EXT2_FS_XATTR is not set
482# CONFIG_EXT2_FS_XIP is not set
483# CONFIG_EXT3_FS is not set
484# CONFIG_EXT4_FS is not set
485# CONFIG_REISERFS_FS is not set
486# CONFIG_JFS_FS is not set
487# CONFIG_FS_POSIX_ACL is not set
488CONFIG_FILE_LOCKING=y
489# CONFIG_XFS_FS is not set
490# CONFIG_OCFS2_FS is not set
491# CONFIG_BTRFS_FS is not set
492# CONFIG_DNOTIFY is not set
493# CONFIG_INOTIFY is not set
494# CONFIG_QUOTA is not set
495# CONFIG_AUTOFS_FS is not set
496# CONFIG_AUTOFS4_FS is not set
497# CONFIG_FUSE_FS is not set
498
499#
500# Caches
501#
502# CONFIG_FSCACHE is not set
503
504#
505# CD-ROM/DVD Filesystems
506#
507# CONFIG_ISO9660_FS is not set
508# CONFIG_UDF_FS is not set
509
510#
511# DOS/FAT/NT Filesystems
512#
513# CONFIG_MSDOS_FS is not set
514# CONFIG_VFAT_FS is not set
515# CONFIG_NTFS_FS is not set
516
517#
518# Pseudo filesystems
519#
520CONFIG_PROC_FS=y
521# CONFIG_PROC_KCORE is not set
522CONFIG_PROC_SYSCTL=y
523CONFIG_PROC_PAGE_MONITOR=y
524CONFIG_SYSFS=y
525CONFIG_TMPFS=y
526# CONFIG_TMPFS_POSIX_ACL is not set
527# CONFIG_HUGETLB_PAGE is not set
528# CONFIG_CONFIGFS_FS is not set
529CONFIG_MISC_FILESYSTEMS=y
530# CONFIG_ADFS_FS is not set
531# CONFIG_AFFS_FS is not set
532# CONFIG_HFS_FS is not set
533# CONFIG_HFSPLUS_FS is not set
534# CONFIG_BEFS_FS is not set
535# CONFIG_BFS_FS is not set
536# CONFIG_EFS_FS is not set
537# CONFIG_CRAMFS is not set
538# CONFIG_SQUASHFS is not set
539# CONFIG_VXFS_FS is not set
540# CONFIG_MINIX_FS is not set
541# CONFIG_OMFS_FS is not set
542# CONFIG_HPFS_FS is not set
543# CONFIG_QNX4FS_FS is not set
544# CONFIG_ROMFS_FS is not set
545# CONFIG_SYSV_FS is not set
546# CONFIG_UFS_FS is not set
547# CONFIG_NILFS2_FS is not set
548CONFIG_NETWORK_FILESYSTEMS=y
549CONFIG_NFS_FS=y
550CONFIG_NFS_V3=y
551# CONFIG_NFS_V3_ACL is not set
552# CONFIG_NFS_V4 is not set
553# CONFIG_NFSD is not set
554CONFIG_LOCKD=y
555CONFIG_LOCKD_V4=y
556CONFIG_NFS_COMMON=y
557CONFIG_SUNRPC=y
558# CONFIG_RPCSEC_GSS_KRB5 is not set
559# CONFIG_RPCSEC_GSS_SPKM3 is not set
560# CONFIG_SMB_FS is not set
561CONFIG_CIFS=y
562CONFIG_CIFS_STATS=y
563CONFIG_CIFS_STATS2=y
564# CONFIG_CIFS_WEAK_PW_HASH is not set
565# CONFIG_CIFS_XATTR is not set
566# CONFIG_CIFS_DEBUG2 is not set
567# CONFIG_CIFS_EXPERIMENTAL is not set
568# CONFIG_NCP_FS is not set
569# CONFIG_CODA_FS is not set
570# CONFIG_AFS_FS is not set
571
572#
573# Partition Types
574#
575CONFIG_PARTITION_ADVANCED=y
576# CONFIG_ACORN_PARTITION is not set
577# CONFIG_OSF_PARTITION is not set
578# CONFIG_AMIGA_PARTITION is not set
579# CONFIG_ATARI_PARTITION is not set
580# CONFIG_MAC_PARTITION is not set
581CONFIG_MSDOS_PARTITION=y
582# CONFIG_BSD_DISKLABEL is not set
583# CONFIG_MINIX_SUBPARTITION is not set
584# CONFIG_SOLARIS_X86_PARTITION is not set
585# CONFIG_UNIXWARE_DISKLABEL is not set
586# CONFIG_LDM_PARTITION is not set
587# CONFIG_SGI_PARTITION is not set
588# CONFIG_ULTRIX_PARTITION is not set
589# CONFIG_SUN_PARTITION is not set
590# CONFIG_KARMA_PARTITION is not set
591# CONFIG_EFI_PARTITION is not set
592# CONFIG_SYSV68_PARTITION is not set
593CONFIG_NLS=y
594CONFIG_NLS_DEFAULT="iso8859-1"
595# CONFIG_NLS_CODEPAGE_437 is not set
596# CONFIG_NLS_CODEPAGE_737 is not set
597# CONFIG_NLS_CODEPAGE_775 is not set
598# CONFIG_NLS_CODEPAGE_850 is not set
599# CONFIG_NLS_CODEPAGE_852 is not set
600# CONFIG_NLS_CODEPAGE_855 is not set
601# CONFIG_NLS_CODEPAGE_857 is not set
602# CONFIG_NLS_CODEPAGE_860 is not set
603# CONFIG_NLS_CODEPAGE_861 is not set
604# CONFIG_NLS_CODEPAGE_862 is not set
605# CONFIG_NLS_CODEPAGE_863 is not set
606# CONFIG_NLS_CODEPAGE_864 is not set
607# CONFIG_NLS_CODEPAGE_865 is not set
608# CONFIG_NLS_CODEPAGE_866 is not set
609# CONFIG_NLS_CODEPAGE_869 is not set
610# CONFIG_NLS_CODEPAGE_936 is not set
611# CONFIG_NLS_CODEPAGE_950 is not set
612# CONFIG_NLS_CODEPAGE_932 is not set
613# CONFIG_NLS_CODEPAGE_949 is not set
614# CONFIG_NLS_CODEPAGE_874 is not set
615# CONFIG_NLS_ISO8859_8 is not set
616# CONFIG_NLS_CODEPAGE_1250 is not set
617# CONFIG_NLS_CODEPAGE_1251 is not set
618# CONFIG_NLS_ASCII is not set
619# CONFIG_NLS_ISO8859_1 is not set
620# CONFIG_NLS_ISO8859_2 is not set
621# CONFIG_NLS_ISO8859_3 is not set
622# CONFIG_NLS_ISO8859_4 is not set
623# CONFIG_NLS_ISO8859_5 is not set
624# CONFIG_NLS_ISO8859_6 is not set
625# CONFIG_NLS_ISO8859_7 is not set
626# CONFIG_NLS_ISO8859_9 is not set
627# CONFIG_NLS_ISO8859_13 is not set
628# CONFIG_NLS_ISO8859_14 is not set
629# CONFIG_NLS_ISO8859_15 is not set
630# CONFIG_NLS_KOI8_R is not set
631# CONFIG_NLS_KOI8_U is not set
632# CONFIG_NLS_UTF8 is not set
633# CONFIG_DLM is not set
634
635#
636# Kernel hacking
637#
638# CONFIG_PRINTK_TIME is not set
639CONFIG_ENABLE_WARN_DEPRECATED=y
640CONFIG_ENABLE_MUST_CHECK=y
641CONFIG_FRAME_WARN=1024
642# CONFIG_MAGIC_SYSRQ is not set
643# CONFIG_UNUSED_SYMBOLS is not set
644# CONFIG_DEBUG_FS is not set
645# CONFIG_HEADERS_CHECK is not set
646CONFIG_DEBUG_KERNEL=y
647# CONFIG_DEBUG_SHIRQ is not set
648CONFIG_DETECT_SOFTLOCKUP=y
649# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
650CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
651CONFIG_DETECT_HUNG_TASK=y
652# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
653CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
654CONFIG_SCHED_DEBUG=y
655# CONFIG_SCHEDSTATS is not set
656# CONFIG_TIMER_STATS is not set
657# CONFIG_DEBUG_OBJECTS is not set
658CONFIG_DEBUG_SLAB=y
659# CONFIG_DEBUG_SLAB_LEAK is not set
660CONFIG_DEBUG_SPINLOCK=y
661# CONFIG_DEBUG_MUTEXES is not set
662# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
663# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
664# CONFIG_DEBUG_KOBJECT is not set
665CONFIG_DEBUG_INFO=y
666# CONFIG_DEBUG_VM is not set
667# CONFIG_DEBUG_WRITECOUNT is not set
668# CONFIG_DEBUG_MEMORY_INIT is not set
669# CONFIG_DEBUG_LIST is not set
670# CONFIG_DEBUG_SG is not set
671# CONFIG_DEBUG_NOTIFIERS is not set
672# CONFIG_BOOT_PRINTK_DELAY is not set
673# CONFIG_RCU_TORTURE_TEST is not set
674# CONFIG_RCU_CPU_STALL_DETECTOR is not set
675# CONFIG_BACKTRACE_SELF_TEST is not set
676# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
677# CONFIG_FAULT_INJECTION is not set
678# CONFIG_SYSCTL_SYSCALL_CHECK is not set
679# CONFIG_PAGE_POISONING is not set
680# CONFIG_SAMPLES is not set
681CONFIG_EARLY_PRINTK=y
682CONFIG_HEART_BEAT=y
683CONFIG_DEBUG_BOOTMEM=y
684
685#
686# Security options
687#
688# CONFIG_KEYS is not set
689# CONFIG_SECURITY is not set
690# CONFIG_SECURITYFS is not set
691# CONFIG_SECURITY_FILE_CAPABILITIES is not set
692CONFIG_CRYPTO=y
693
694#
695# Crypto core or helper
696#
697# CONFIG_CRYPTO_FIPS is not set
698# CONFIG_CRYPTO_MANAGER is not set
699# CONFIG_CRYPTO_MANAGER2 is not set
700# CONFIG_CRYPTO_GF128MUL is not set
701# CONFIG_CRYPTO_NULL is not set
702# CONFIG_CRYPTO_CRYPTD is not set
703# CONFIG_CRYPTO_AUTHENC is not set
704# CONFIG_CRYPTO_TEST is not set
705
706#
707# Authenticated Encryption with Associated Data
708#
709# CONFIG_CRYPTO_CCM is not set
710# CONFIG_CRYPTO_GCM is not set
711# CONFIG_CRYPTO_SEQIV is not set
712
713#
714# Block modes
715#
716# CONFIG_CRYPTO_CBC is not set
717# CONFIG_CRYPTO_CTR is not set
718# CONFIG_CRYPTO_CTS is not set
719# CONFIG_CRYPTO_ECB is not set
720# CONFIG_CRYPTO_LRW is not set
721# CONFIG_CRYPTO_PCBC is not set
722# CONFIG_CRYPTO_XTS is not set
723
724#
725# Hash modes
726#
727# CONFIG_CRYPTO_HMAC is not set
728# CONFIG_CRYPTO_XCBC is not set
729
730#
731# Digest
732#
733# CONFIG_CRYPTO_CRC32C is not set
734# CONFIG_CRYPTO_MD4 is not set
735# CONFIG_CRYPTO_MD5 is not set
736# CONFIG_CRYPTO_MICHAEL_MIC is not set
737# CONFIG_CRYPTO_RMD128 is not set
738# CONFIG_CRYPTO_RMD160 is not set
739# CONFIG_CRYPTO_RMD256 is not set
740# CONFIG_CRYPTO_RMD320 is not set
741# CONFIG_CRYPTO_SHA1 is not set
742# CONFIG_CRYPTO_SHA256 is not set
743# CONFIG_CRYPTO_SHA512 is not set
744# CONFIG_CRYPTO_TGR192 is not set
745# CONFIG_CRYPTO_WP512 is not set
746
747#
748# Ciphers
749#
750# CONFIG_CRYPTO_AES is not set
751# CONFIG_CRYPTO_ANUBIS is not set
752# CONFIG_CRYPTO_ARC4 is not set
753# CONFIG_CRYPTO_BLOWFISH is not set
754# CONFIG_CRYPTO_CAMELLIA is not set
755# CONFIG_CRYPTO_CAST5 is not set
756# CONFIG_CRYPTO_CAST6 is not set
757# CONFIG_CRYPTO_DES is not set
758# CONFIG_CRYPTO_FCRYPT is not set
759# CONFIG_CRYPTO_KHAZAD is not set
760# CONFIG_CRYPTO_SALSA20 is not set
761# CONFIG_CRYPTO_SEED is not set
762# CONFIG_CRYPTO_SERPENT is not set
763# CONFIG_CRYPTO_TEA is not set
764# CONFIG_CRYPTO_TWOFISH is not set
765
766#
767# Compression
768#
769# CONFIG_CRYPTO_DEFLATE is not set
770# CONFIG_CRYPTO_ZLIB is not set
771# CONFIG_CRYPTO_LZO is not set
772
773#
774# Random Number Generation
775#
776# CONFIG_CRYPTO_ANSI_CPRNG is not set
777CONFIG_CRYPTO_HW=y
778# CONFIG_BINARY_PRINTF is not set
779
780#
781# Library routines
782#
783CONFIG_BITREVERSE=y
784CONFIG_GENERIC_FIND_LAST_BIT=y
785# CONFIG_CRC_CCITT is not set
786# CONFIG_CRC16 is not set
787# CONFIG_CRC_T10DIF is not set
788# CONFIG_CRC_ITU_T is not set
789CONFIG_CRC32=y
790# CONFIG_CRC7 is not set
791# CONFIG_LIBCRC32C is not set
792CONFIG_ZLIB_INFLATE=y
793CONFIG_DECOMPRESS_GZIP=y
794CONFIG_HAS_IOMEM=y
795CONFIG_HAS_IOPORT=y
796CONFIG_HAS_DMA=y
797CONFIG_HAVE_LMB=y
798CONFIG_NLATTR=y
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 31820dfef56b..db5294c30caf 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,26 +1,3 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += auxvec.h 3header-y += elf.h
4header-y += errno.h
5header-y += fcntl.h
6header-y += ioctl.h
7header-y += ioctls.h
8header-y += ipcbuf.h
9header-y += linkage.h
10header-y += msgbuf.h
11header-y += poll.h
12header-y += resource.h
13header-y += sembuf.h
14header-y += shmbuf.h
15header-y += sigcontext.h
16header-y += siginfo.h
17header-y += socket.h
18header-y += sockios.h
19header-y += statfs.h
20header-y += stat.h
21header-y += termbits.h
22header-y += ucontext.h
23
24unifdef-y += cputable.h
25unifdef-y += elf.h
26unifdef-y += termios.h
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 3300b785049b..f989d6aad648 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007 PetaLogix 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
3 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 4 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
4 * based on v850 version which was 5 * based on v850 version which was
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation 6 * Copyright (C) 2001,02,03 NEC Electronics Corporation
@@ -43,6 +44,23 @@
43#define flush_icache_range(start, len) __invalidate_icache_range(start, len) 44#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
44#define flush_icache_page(vma, pg) do { } while (0) 45#define flush_icache_page(vma, pg) do { } while (0)
45 46
47#ifndef CONFIG_MMU
48# define flush_icache_user_range(start, len) do { } while (0)
49#else
50# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
51
52# define flush_page_to_ram(page) do { } while (0)
53
54# define flush_icache() __invalidate_icache_all()
55# define flush_cache_sigtramp(vaddr) \
56 __invalidate_icache_range(vaddr, vaddr + 8)
57
58# define flush_dcache_mmap_lock(mapping) do { } while (0)
59# define flush_dcache_mmap_unlock(mapping) do { } while (0)
60
61# define flush_cache_dup_mm(mm) do { } while (0)
62#endif
63
46#define flush_cache_vmap(start, end) do { } while (0) 64#define flush_cache_vmap(start, end) do { } while (0)
47#define flush_cache_vunmap(start, end) do { } while (0) 65#define flush_cache_vunmap(start, end) do { } while (0)
48 66
diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h
index 92b30762ce59..97ea46b5cf80 100644
--- a/arch/microblaze/include/asm/checksum.h
+++ b/arch/microblaze/include/asm/checksum.h
@@ -51,7 +51,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
51 * here even more important to align src and dst on a 32-bit (or even 51 * here even more important to align src and dst on a 32-bit (or even
52 * better 64-bit) boundary 52 * better 64-bit) boundary
53 */ 53 */
54extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum); 54extern __wsum csum_partial_copy(const void *src, void *dst, int len,
55 __wsum sum);
55 56
56/* 57/*
57 * the same as csum_partial_copy, but copies from user space. 58 * the same as csum_partial_copy, but copies from user space.
@@ -59,8 +60,8 @@ extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum);
59 * here even more important to align src and dst on a 32-bit (or even 60 * here even more important to align src and dst on a 32-bit (or even
60 * better 64-bit) boundary 61 * better 64-bit) boundary
61 */ 62 */
62extern __wsum csum_partial_copy_from_user(const char *src, char *dst, 63extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
63 int len, int sum, int *csum_err); 64 int len, __wsum sum, int *csum_err);
64 65
65#define csum_partial_copy_nocheck(src, dst, len, sum) \ 66#define csum_partial_copy_nocheck(src, dst, len, sum) \
66 csum_partial_copy((src), (dst), (len), (sum)) 67 csum_partial_copy((src), (dst), (len), (sum))
@@ -75,11 +76,12 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
75/* 76/*
76 * Fold a partial checksum 77 * Fold a partial checksum
77 */ 78 */
78static inline __sum16 csum_fold(unsigned int sum) 79static inline __sum16 csum_fold(__wsum csum)
79{ 80{
81 u32 sum = (__force u32)csum;
80 sum = (sum & 0xffff) + (sum >> 16); 82 sum = (sum & 0xffff) + (sum >> 16);
81 sum = (sum & 0xffff) + (sum >> 16); 83 sum = (sum & 0xffff) + (sum >> 16);
82 return ~sum; 84 return (__force __sum16)~sum;
83} 85}
84 86
85static inline __sum16 87static inline __sum16
@@ -93,6 +95,6 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
93 * this routine is used for miscellaneous IP-like checksums, mainly 95 * this routine is used for miscellaneous IP-like checksums, mainly
94 * in icmp.c 96 * in icmp.c
95 */ 97 */
96extern __sum16 ip_compute_csum(const unsigned char *buff, int len); 98extern __sum16 ip_compute_csum(const void *buff, int len);
97 99
98#endif /* _ASM_MICROBLAZE_CHECKSUM_H */ 100#endif /* _ASM_MICROBLAZE_CHECKSUM_H */
diff --git a/arch/microblaze/include/asm/current.h b/arch/microblaze/include/asm/current.h
index 8375ea991e26..29303ed825cc 100644
--- a/arch/microblaze/include/asm/current.h
+++ b/arch/microblaze/include/asm/current.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,12 @@
9#ifndef _ASM_MICROBLAZE_CURRENT_H 11#ifndef _ASM_MICROBLAZE_CURRENT_H
10#define _ASM_MICROBLAZE_CURRENT_H 12#define _ASM_MICROBLAZE_CURRENT_H
11 13
14/*
15 * Register used to hold the current task pointer while in the kernel.
16 * Any `call clobbered' register without a special meaning should be OK,
17 * but check asm/microblaze/kernel/entry.S to be sure.
18 */
19#define CURRENT_TASK r31
12# ifndef __ASSEMBLY__ 20# ifndef __ASSEMBLY__
13/* 21/*
14 * Dedicate r31 to keeping the current task pointer 22 * Dedicate r31 to keeping the current task pointer
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 17336252a9b8..d00e40099165 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -1,129 +1 @@
1/* #include <asm-generic/dma-mapping-broken.h>
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
10#define _ASM_MICROBLAZE_DMA_MAPPING_H
11
12#include <asm/cacheflush.h>
13#include <linux/io.h>
14#include <linux/bug.h>
15
16struct scatterlist;
17
18#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
19#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
20
21/* FIXME */
22static inline int
23dma_supported(struct device *dev, u64 mask)
24{
25 return 1;
26}
27
28static inline dma_addr_t
29dma_map_page(struct device *dev, struct page *page,
30 unsigned long offset, size_t size,
31 enum dma_data_direction direction)
32{
33 BUG();
34 return 0;
35}
36
37static inline void
38dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
39 enum dma_data_direction direction)
40{
41 BUG();
42}
43
44static inline int
45dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
46 enum dma_data_direction direction)
47{
48 BUG();
49 return 0;
50}
51
52static inline void
53dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
54 enum dma_data_direction direction)
55{
56 BUG();
57}
58
59static inline void
60dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
61 enum dma_data_direction direction)
62{
63 BUG();
64}
65
66static inline void
67dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
68 size_t size, enum dma_data_direction direction)
69{
70 BUG();
71}
72
73static inline void
74dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
75 enum dma_data_direction direction)
76{
77 BUG();
78}
79
80static inline void
81dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
82 enum dma_data_direction direction)
83{
84 BUG();
85}
86
87static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
88{
89 return 0;
90}
91
92static inline void *dma_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_handle, int flag)
94{
95 return NULL; /* consistent_alloc(flag, size, dma_handle); */
96}
97
98static inline void dma_free_coherent(struct device *dev, size_t size,
99 void *vaddr, dma_addr_t dma_handle)
100{
101 BUG();
102}
103
104static inline dma_addr_t
105dma_map_single(struct device *dev, void *ptr, size_t size,
106 enum dma_data_direction direction)
107{
108 BUG_ON(direction == DMA_NONE);
109
110 return virt_to_bus(ptr);
111}
112
113static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
114 size_t size,
115 enum dma_data_direction direction)
116{
117 switch (direction) {
118 case DMA_FROM_DEVICE:
119 flush_dcache_range((unsigned)dma_addr,
120 (unsigned)dma_addr + size);
121 /* Fall through */
122 case DMA_TO_DEVICE:
123 break;
124 default:
125 BUG();
126 }
127}
128
129#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h
index 0967fa04fc5e..08c073badf19 100644
--- a/arch/microblaze/include/asm/dma.h
+++ b/arch/microblaze/include/asm/dma.h
@@ -9,8 +9,13 @@
9#ifndef _ASM_MICROBLAZE_DMA_H 9#ifndef _ASM_MICROBLAZE_DMA_H
10#define _ASM_MICROBLAZE_DMA_H 10#define _ASM_MICROBLAZE_DMA_H
11 11
12#ifndef CONFIG_MMU
12/* we don't have dma address limit. define it as zero to be 13/* we don't have dma address limit. define it as zero to be
13 * unlimited. */ 14 * unlimited. */
14#define MAX_DMA_ADDRESS (0) 15#define MAX_DMA_ADDRESS (0)
16#else
17/* Virtual address corresponding to last available physical memory address. */
18#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
19#endif
15 20
16#endif /* _ASM_MICROBLAZE_DMA_H */ 21#endif /* _ASM_MICROBLAZE_DMA_H */
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index 81337f241347..f92fc0dda006 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -27,4 +29,95 @@
27 */ 29 */
28#define ELF_CLASS ELFCLASS32 30#define ELF_CLASS ELFCLASS32
29 31
32#ifndef __uClinux__
33
34/*
35 * ELF register definitions..
36 */
37
38#include <asm/ptrace.h>
39#include <asm/byteorder.h>
40
41#ifndef ELF_GREG_T
42#define ELF_GREG_T
43typedef unsigned long elf_greg_t;
44#endif
45
46#ifndef ELF_NGREG
47#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
48#endif
49
50#ifndef ELF_GREGSET_T
51#define ELF_GREGSET_T
52typedef elf_greg_t elf_gregset_t[ELF_NGREG];
53#endif
54
55#ifndef ELF_FPREGSET_T
56#define ELF_FPREGSET_T
57
58/* TBD */
59#define ELF_NFPREG 33 /* includes fsr */
60typedef unsigned long elf_fpreg_t;
61typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
62
63/* typedef struct user_fpu_struct elf_fpregset_t; */
64#endif
65
66/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
67 * use of this is to invoke "./ld.so someprog" to test out a new version of
68 * the loader. We need to make sure that it is out of the way of the program
69 * that it will "exec", and that there is sufficient room for the brk.
70 */
71
72#define ELF_ET_DYN_BASE (0x08000000)
73
74#ifdef __LITTLE_ENDIAN__
75#define ELF_DATA ELFDATA2LSB
76#else
77#define ELF_DATA ELFDATA2MSB
78#endif
79
80#define USE_ELF_CORE_DUMP
81#define ELF_EXEC_PAGESIZE 4096
82
83
84#define ELF_CORE_COPY_REGS(_dest, _regs) \
85 memcpy((char *) &_dest, (char *) _regs, \
86 sizeof(struct pt_regs));
87
88/* This yields a mask that user programs can use to figure out what
89 * instruction set this CPU supports. This could be done in user space,
90 * but it's not easy, and we've already done it here.
91 */
92#define ELF_HWCAP (0)
93
94/* This yields a string that ld.so will use to load implementation
95 * specific libraries for optimization. This is more specific in
96 * intent than poking at uname or /proc/cpuinfo.
97
98 * For the moment, we have only optimizations for the Intel generations,
99 * but that could change...
100 */
101#define ELF_PLATFORM (NULL)
102
103/* Added _f parameter. Is this definition correct: TBD */
104#define ELF_PLAT_INIT(_r, _f) \
105do { \
106 _r->r1 = _r->r1 = _r->r2 = _r->r3 = \
107 _r->r4 = _r->r5 = _r->r6 = _r->r7 = \
108 _r->r8 = _r->r9 = _r->r10 = _r->r11 = \
109 _r->r12 = _r->r13 = _r->r14 = _r->r15 = \
110 _r->r16 = _r->r17 = _r->r18 = _r->r19 = \
111 _r->r20 = _r->r21 = _r->r22 = _r->r23 = \
112 _r->r24 = _r->r25 = _r->r26 = _r->r27 = \
113 _r->r28 = _r->r29 = _r->r30 = _r->r31 = \
114 0; \
115} while (0)
116
117#ifdef __KERNEL__
118#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
119#endif
120
121#endif /* __uClinux__ */
122
30#endif /* _ASM_MICROBLAZE_ELF_H */ 123#endif /* _ASM_MICROBLAZE_ELF_H */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index e4c3aef884df..61abbd232640 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Definitions used by low-level trap handlers 2 * Definitions used by low-level trap handlers
3 * 3 *
4 * Copyright (C) 2008 Michal Simek 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007 - 2008 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
31DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ 31DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
32# endif /* __ASSEMBLY__ */ 32# endif /* __ASSEMBLY__ */
33 33
34#ifndef CONFIG_MMU
35
34/* noMMU hasn't any space for args */ 36/* noMMU hasn't any space for args */
35# define STATE_SAVE_ARG_SPACE (0) 37# define STATE_SAVE_ARG_SPACE (0)
36 38
39#else /* CONFIG_MMU */
40
41/* If true, system calls save and restore all registers (except result
42 * registers, of course). If false, then `call clobbered' registers
43 * will not be preserved, on the theory that system calls are basically
44 * function calls anyway, and the caller should be able to deal with it.
45 * This is a security risk, of course, as `internal' values may leak out
46 * after a system call, but that certainly doesn't matter very much for
47 * a processor with no MMU protection! For a protected-mode kernel, it
48 * would be faster to just zero those registers before returning.
49 *
50 * I can not rely on the glibc implementation. If you turn it off make
51 * sure that r11/r12 is saved in user-space. --KAA
52 *
53 * These are special variables using by the kernel trap/interrupt code
54 * to save registers in, at a time when there are no spare registers we
55 * can use to do so, and we can't depend on the value of the stack
56 * pointer. This means that they must be within a signed 16-bit
57 * displacement of 0x00000000.
58 */
59
60/* A `state save frame' is a struct pt_regs preceded by some extra space
61 * suitable for a function call stack frame. */
62
63/* Amount of room on the stack reserved for arguments and to satisfy the
64 * C calling conventions, in addition to the space used by the struct
65 * pt_regs that actually holds saved values. */
66#define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */
67
68#endif /* CONFIG_MMU */
69
37#endif /* _ASM_MICROBLAZE_ENTRY_H */ 70#endif /* _ASM_MICROBLAZE_ENTRY_H */
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h
index 24ca540e77c0..90731df9e574 100644
--- a/arch/microblaze/include/asm/exceptions.h
+++ b/arch/microblaze/include/asm/exceptions.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Preliminary support for HW exception handing for Microblaze 2 * Preliminary support for HW exception handing for Microblaze
3 * 3 *
4 * Copyright (C) 2008 Michal Simek 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008 PetaLogix 5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> 6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
@@ -64,21 +64,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
64void die(const char *str, struct pt_regs *fp, long err); 64void die(const char *str, struct pt_regs *fp, long err);
65void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); 65void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
66 66
67#if defined(CONFIG_XMON) 67#ifdef CONFIG_MMU
68extern void xmon(struct pt_regs *regs); 68void __bug(const char *file, int line, void *data);
69extern int xmon_bpt(struct pt_regs *regs); 69int bad_trap(int trap_num, struct pt_regs *regs);
70extern int xmon_sstep(struct pt_regs *regs); 70int debug_trap(struct pt_regs *regs);
71extern int xmon_iabr_match(struct pt_regs *regs); 71#endif /* CONFIG_MMU */
72extern int xmon_dabr_match(struct pt_regs *regs);
73extern void (*xmon_fault_handler)(struct pt_regs *regs);
74 72
75void (*debugger)(struct pt_regs *regs) = xmon; 73#if defined(CONFIG_KGDB)
76int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
77int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
78int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
79int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
80void (*debugger_fault_handler)(struct pt_regs *regs);
81#elif defined(CONFIG_KGDB)
82void (*debugger)(struct pt_regs *regs); 74void (*debugger)(struct pt_regs *regs);
83int (*debugger_bpt)(struct pt_regs *regs); 75int (*debugger_bpt)(struct pt_regs *regs);
84int (*debugger_sstep)(struct pt_regs *regs); 76int (*debugger_sstep)(struct pt_regs *regs);
diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h
index acf0da543ef1..6847c1512c7b 100644
--- a/arch/microblaze/include/asm/flat.h
+++ b/arch/microblaze/include/asm/flat.h
@@ -13,7 +13,6 @@
13 13
14#include <asm/unaligned.h> 14#include <asm/unaligned.h>
15 15
16#define flat_stack_align(sp) /* nothing needed */
17#define flat_argvp_envp_on_stack() 0 16#define flat_argvp_envp_on_stack() 0
18#define flat_old_ram_flag(flags) (flags) 17#define flat_old_ram_flag(flags) (flags)
19#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 18#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/microblaze/include/asm/gpio.h b/arch/microblaze/include/asm/gpio.h
index ea04632399d8..2345ac354d9b 100644
--- a/arch/microblaze/include/asm/gpio.h
+++ b/arch/microblaze/include/asm/gpio.h
@@ -11,8 +11,8 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __ASM_POWERPC_GPIO_H 14#ifndef _ASM_MICROBLAZE_GPIO_H
15#define __ASM_POWERPC_GPIO_H 15#define _ASM_MICROBLAZE_GPIO_H
16 16
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <asm-generic/gpio.h> 18#include <asm-generic/gpio.h>
@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
53 53
54#endif /* CONFIG_GPIOLIB */ 54#endif /* CONFIG_GPIOLIB */
55 55
56#endif /* __ASM_POWERPC_GPIO_H */ 56#endif /* _ASM_MICROBLAZE_GPIO_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 8b5853ee6b5c..5c173424d074 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -12,6 +14,9 @@
12#include <asm/byteorder.h> 14#include <asm/byteorder.h>
13#include <asm/page.h> 15#include <asm/page.h>
14#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/byteorder.h>
18#include <linux/mm.h> /* Get struct page {...} */
19
15 20
16#define IO_SPACE_LIMIT (0xFFFFFFFF) 21#define IO_SPACE_LIMIT (0xFFFFFFFF)
17 22
@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
112#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 117#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
113#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 118#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
114 119
120#ifdef CONFIG_MMU
121
122#define mm_ptov(addr) ((void *)__phys_to_virt(addr))
123#define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr))
124#define phys_to_virt(addr) ((void *)__phys_to_virt(addr))
125#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
126#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
127
128#define __page_address(page) \
129 (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
130#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
131#define page_to_bus(page) (page_to_phys(page))
132#define bus_to_virt(addr) (phys_to_virt(addr))
133
134extern void iounmap(void *addr);
135/*extern void *__ioremap(phys_addr_t address, unsigned long size,
136 unsigned long flags);*/
137extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
138#define ioremap_writethrough(addr, size) ioremap((addr), (size))
139#define ioremap_nocache(addr, size) ioremap((addr), (size))
140#define ioremap_fullcache(addr, size) ioremap((addr), (size))
141
142#else /* CONFIG_MMU */
143
115/** 144/**
116 * virt_to_phys - map virtual addresses to physical 145 * virt_to_phys - map virtual addresses to physical
117 * @address: address to remap 146 * @address: address to remap
@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
160#define iounmap(addr) ((void)0) 189#define iounmap(addr) ((void)0)
161#define ioremap_nocache(physaddr, size) ioremap(physaddr, size) 190#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
162 191
192#endif /* CONFIG_MMU */
193
163/* 194/*
164 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 195 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
165 * access 196 * access
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h
index 0e0431d61635..66cad6a99d77 100644
--- a/arch/microblaze/include/asm/mmu.h
+++ b/arch/microblaze/include/asm/mmu.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,11 +11,109 @@
9#ifndef _ASM_MICROBLAZE_MMU_H 11#ifndef _ASM_MICROBLAZE_MMU_H
10#define _ASM_MICROBLAZE_MMU_H 12#define _ASM_MICROBLAZE_MMU_H
11 13
12#ifndef __ASSEMBLY__ 14# ifndef CONFIG_MMU
15# ifndef __ASSEMBLY__
13typedef struct { 16typedef struct {
14 struct vm_list_struct *vmlist; 17 struct vm_list_struct *vmlist;
15 unsigned long end_brk; 18 unsigned long end_brk;
16} mm_context_t; 19} mm_context_t;
17#endif /* __ASSEMBLY__ */ 20# endif /* __ASSEMBLY__ */
21# else /* CONFIG_MMU */
22# ifdef __KERNEL__
23# ifndef __ASSEMBLY__
18 24
25/* Default "unsigned long" context */
26typedef unsigned long mm_context_t;
27
28/* Hardware Page Table Entry */
29typedef struct _PTE {
30 unsigned long v:1; /* Entry is valid */
31 unsigned long vsid:24; /* Virtual segment identifier */
32 unsigned long h:1; /* Hash algorithm indicator */
33 unsigned long api:6; /* Abbreviated page index */
34 unsigned long rpn:20; /* Real (physical) page number */
35 unsigned long :3; /* Unused */
36 unsigned long r:1; /* Referenced */
37 unsigned long c:1; /* Changed */
38 unsigned long w:1; /* Write-thru cache mode */
39 unsigned long i:1; /* Cache inhibited */
40 unsigned long m:1; /* Memory coherence */
41 unsigned long g:1; /* Guarded */
42 unsigned long :1; /* Unused */
43 unsigned long pp:2; /* Page protection */
44} PTE;
45
46/* Values for PP (assumes Ks=0, Kp=1) */
47# define PP_RWXX 0 /* Supervisor read/write, User none */
48# define PP_RWRX 1 /* Supervisor read/write, User read */
49# define PP_RWRW 2 /* Supervisor read/write, User read/write */
50# define PP_RXRX 3 /* Supervisor read, User read */
51
52/* Segment Register */
53typedef struct _SEGREG {
54 unsigned long t:1; /* Normal or I/O type */
55 unsigned long ks:1; /* Supervisor 'key' (normally 0) */
56 unsigned long kp:1; /* User 'key' (normally 1) */
57 unsigned long n:1; /* No-execute */
58 unsigned long :4; /* Unused */
59 unsigned long vsid:24; /* Virtual Segment Identifier */
60} SEGREG;
61
62extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
63extern void _tlbia(void); /* invalidate all TLB entries */
64# endif /* __ASSEMBLY__ */
65
66/*
67 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
68 * instruction and data sides share a unified, 64-entry, semi-associative
69 * TLB which is maintained totally under software control. In addition, the
70 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
71 * TLB which serves as a first level to the shared TLB. These two TLBs are
72 * known as the UTLB and ITLB, respectively.
73 */
74
75# define MICROBLAZE_TLB_SIZE 64
76
77/*
78 * TLB entries are defined by a "high" tag portion and a "low" data
79 * portion. The data portion is 32-bits.
80 *
81 * TLB entries are managed entirely under software control by reading,
82 * writing, and searching using the MTS and MFS instructions.
83 */
84
85# define TLB_LO 1
86# define TLB_HI 0
87# define TLB_DATA TLB_LO
88# define TLB_TAG TLB_HI
89
90/* Tag portion */
91# define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
92# define TLB_PAGESZ_MASK 0x00000380
93# define TLB_PAGESZ(x) (((x) & 0x7) << 7)
94# define PAGESZ_1K 0
95# define PAGESZ_4K 1
96# define PAGESZ_16K 2
97# define PAGESZ_64K 3
98# define PAGESZ_256K 4
99# define PAGESZ_1M 5
100# define PAGESZ_4M 6
101# define PAGESZ_16M 7
102# define TLB_VALID 0x00000040 /* Entry is valid */
103
104/* Data portion */
105# define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
106# define TLB_PERM_MASK 0x00000300
107# define TLB_EX 0x00000200 /* Instruction execution allowed */
108# define TLB_WR 0x00000100 /* Writes permitted */
109# define TLB_ZSEL_MASK 0x000000F0
110# define TLB_ZSEL(x) (((x) & 0xF) << 4)
111# define TLB_ATTR_MASK 0x0000000F
112# define TLB_W 0x00000008 /* Caching is write-through */
113# define TLB_I 0x00000004 /* Caching is inhibited */
114# define TLB_M 0x00000002 /* Memory is coherent */
115# define TLB_G 0x00000001 /* Memory is guarded from prefetch */
116
117# endif /* __KERNEL__ */
118# endif /* CONFIG_MMU */
19#endif /* _ASM_MICROBLAZE_MMU_H */ 119#endif /* _ASM_MICROBLAZE_MMU_H */
diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h
index 150ca01b74ba..385fed16bbfb 100644
--- a/arch/microblaze/include/asm/mmu_context.h
+++ b/arch/microblaze/include/asm/mmu_context.h
@@ -1,21 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * Copyright (C) 2006 Atmark Techno, Inc. 2# include "mmu_context_mm.h"
3 * 3#else
4 * This file is subject to the terms and conditions of the GNU General Public 4# include "mmu_context_no.h"
5 * License. See the file "COPYING" in the main directory of this archive 5#endif
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
10#define _ASM_MICROBLAZE_MMU_CONTEXT_H
11
12# define init_new_context(tsk, mm) ({ 0; })
13
14# define enter_lazy_tlb(mm, tsk) do {} while (0)
15# define change_mm_context(old, ctx, _pml4) do {} while (0)
16# define destroy_context(mm) do {} while (0)
17# define deactivate_mm(tsk, mm) do {} while (0)
18# define switch_mm(prev, next, tsk) do {} while (0)
19# define activate_mm(prev, next) do {} while (0)
20
21#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h
new file mode 100644
index 000000000000..3e5c254e8d1c
--- /dev/null
+++ b/arch/microblaze/include/asm/mmu_context_mm.h
@@ -0,0 +1,140 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
12#define _ASM_MICROBLAZE_MMU_CONTEXT_H
13
14#include <asm/atomic.h>
15#include <asm/bitops.h>
16#include <asm/mmu.h>
17#include <asm-generic/mm_hooks.h>
18
19# ifdef __KERNEL__
20/*
21 * This function defines the mapping from contexts to VSIDs (virtual
22 * segment IDs). We use a skew on both the context and the high 4 bits
23 * of the 32-bit virtual address (the "effective segment ID") in order
24 * to spread out the entries in the MMU hash table.
25 */
26# define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
27 & 0xffffff)
28
29/*
30 MicroBlaze has 256 contexts, so we can just rotate through these
31 as a way of "switching" contexts. If the TID of the TLB is zero,
32 the PID/TID comparison is disabled, so we can use a TID of zero
33 to represent all kernel pages as shared among all contexts.
34 */
35
36static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
37{
38}
39
40# define NO_CONTEXT 256
41# define LAST_CONTEXT 255
42# define FIRST_CONTEXT 1
43
44/*
45 * Set the current MMU context.
46 * This is done byloading up the segment registers for the user part of the
47 * address space.
48 *
49 * Since the PGD is immediately available, it is much faster to simply
50 * pass this along as a second parameter, which is required for 8xx and
51 * can be used for debugging on all processors (if you happen to have
52 * an Abatron).
53 */
54extern void set_context(mm_context_t context, pgd_t *pgd);
55
56/*
57 * Bitmap of contexts in use.
58 * The size of this bitmap is LAST_CONTEXT + 1 bits.
59 */
60extern unsigned long context_map[];
61
62/*
63 * This caches the next context number that we expect to be free.
64 * Its use is an optimization only, we can't rely on this context
65 * number to be free, but it usually will be.
66 */
67extern mm_context_t next_mmu_context;
68
69/*
70 * Since we don't have sufficient contexts to give one to every task
71 * that could be in the system, we need to be able to steal contexts.
72 * These variables support that.
73 */
74extern atomic_t nr_free_contexts;
75extern struct mm_struct *context_mm[LAST_CONTEXT+1];
76extern void steal_context(void);
77
78/*
79 * Get a new mmu context for the address space described by `mm'.
80 */
81static inline void get_mmu_context(struct mm_struct *mm)
82{
83 mm_context_t ctx;
84
85 if (mm->context != NO_CONTEXT)
86 return;
87 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
88 steal_context();
89 ctx = next_mmu_context;
90 while (test_and_set_bit(ctx, context_map)) {
91 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
92 if (ctx > LAST_CONTEXT)
93 ctx = 0;
94 }
95 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
96 mm->context = ctx;
97 context_mm[ctx] = mm;
98}
99
100/*
101 * Set up the context for a new address space.
102 */
103# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
104
105/*
106 * We're finished using the context for an address space.
107 */
108static inline void destroy_context(struct mm_struct *mm)
109{
110 if (mm->context != NO_CONTEXT) {
111 clear_bit(mm->context, context_map);
112 mm->context = NO_CONTEXT;
113 atomic_inc(&nr_free_contexts);
114 }
115}
116
117static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
118 struct task_struct *tsk)
119{
120 tsk->thread.pgdir = next->pgd;
121 get_mmu_context(next);
122 set_context(next->context, next->pgd);
123}
124
125/*
126 * After we have set current->mm to a new value, this activates
127 * the context for the new mm so we see the new mappings.
128 */
129static inline void activate_mm(struct mm_struct *active_mm,
130 struct mm_struct *mm)
131{
132 current->thread.pgdir = mm->pgd;
133 get_mmu_context(mm);
134 set_context(mm->context, mm->pgd);
135}
136
137extern void mmu_context_init(void);
138
139# endif /* __KERNEL__ */
140#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/mmu_context_no.h b/arch/microblaze/include/asm/mmu_context_no.h
new file mode 100644
index 000000000000..ba5567190154
--- /dev/null
+++ b/arch/microblaze/include/asm/mmu_context_no.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
12#define _ASM_MICROBLAZE_MMU_CONTEXT_H
13
14# define init_new_context(tsk, mm) ({ 0; })
15
16# define enter_lazy_tlb(mm, tsk) do {} while (0)
17# define change_mm_context(old, ctx, _pml4) do {} while (0)
18# define destroy_context(mm) do {} while (0)
19# define deactivate_mm(tsk, mm) do {} while (0)
20# define switch_mm(prev, next, tsk) do {} while (0)
21# define activate_mm(prev, next) do {} while (0)
22
23#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 962c210e5b9a..72aceae88680 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * VM ops
3 * Copyright (C) 2008 PetaLogix 3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 6 * Copyright (C) 2006 Atmark Techno, Inc.
5 * Changes for MMU support: 7 * Changes for MMU support:
6 * Copyright (C) 2007 Xilinx, Inc. All rights reserved. 8 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
@@ -15,14 +17,15 @@
15 17
16#include <linux/pfn.h> 18#include <linux/pfn.h>
17#include <asm/setup.h> 19#include <asm/setup.h>
20#include <linux/const.h>
21
22#ifdef __KERNEL__
18 23
19/* PAGE_SHIFT determines the page size */ 24/* PAGE_SHIFT determines the page size */
20#define PAGE_SHIFT (12) 25#define PAGE_SHIFT (12)
21#define PAGE_SIZE (1UL << PAGE_SHIFT) 26#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
22#define PAGE_MASK (~(PAGE_SIZE-1)) 27#define PAGE_MASK (~(PAGE_SIZE-1))
23 28
24#ifdef __KERNEL__
25
26#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
27 30
28#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 31#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
@@ -35,6 +38,7 @@
35/* align addr on a size boundary - adjust address up if needed */ 38/* align addr on a size boundary - adjust address up if needed */
36#define _ALIGN(addr, size) _ALIGN_UP(addr, size) 39#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
37 40
41#ifndef CONFIG_MMU
38/* 42/*
39 * PAGE_OFFSET -- the first address of the first page of memory. When not 43 * PAGE_OFFSET -- the first address of the first page of memory. When not
40 * using MMU this corresponds to the first free page in physical memory (aligned 44 * using MMU this corresponds to the first free page in physical memory (aligned
@@ -43,15 +47,44 @@
43extern unsigned int __page_offset; 47extern unsigned int __page_offset;
44#define PAGE_OFFSET __page_offset 48#define PAGE_OFFSET __page_offset
45 49
46#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 50#else /* CONFIG_MMU */
47#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
48#define free_user_page(page, addr) free_page(addr)
49 51
50#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 52/*
53 * PAGE_OFFSET -- the first address of the first page of memory. With MMU
54 * it is set to the kernel start address (aligned on a page boundary).
55 *
56 * CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used
57 * in arch/microblaze/Makefile.
58 */
59#define PAGE_OFFSET CONFIG_KERNEL_START
51 60
61/*
62 * MAP_NR -- given an address, calculate the index of the page struct which
63 * points to the address's page.
64 */
65#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
52 66
53#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 67/*
54#define copy_user_page(vto, vfrom, vaddr, topg) \ 68 * The basic type of a PTE - 32 bit physical addressing.
69 */
70typedef unsigned long pte_basic_t;
71#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
72#define PTE_FMT "%.8lx"
73
74#endif /* CONFIG_MMU */
75
76# ifndef CONFIG_MMU
77# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
78# define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
79# define free_user_page(page, addr) free_page(addr)
80# else /* CONFIG_MMU */
81extern void copy_page(void *to, void *from);
82# endif /* CONFIG_MMU */
83
84# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
85
86# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
87# define copy_user_page(vto, vfrom, vaddr, topg) \
55 memcpy((vto), (vfrom), PAGE_SIZE) 88 memcpy((vto), (vfrom), PAGE_SIZE)
56 89
57/* 90/*
@@ -60,21 +93,32 @@ extern unsigned int __page_offset;
60typedef struct page *pgtable_t; 93typedef struct page *pgtable_t;
61typedef struct { unsigned long pte; } pte_t; 94typedef struct { unsigned long pte; } pte_t;
62typedef struct { unsigned long pgprot; } pgprot_t; 95typedef struct { unsigned long pgprot; } pgprot_t;
96/* FIXME this can depend on linux kernel version */
97# ifdef CONFIG_MMU
98typedef struct { unsigned long pmd; } pmd_t;
99typedef struct { unsigned long pgd; } pgd_t;
100# else /* CONFIG_MMU */
63typedef struct { unsigned long ste[64]; } pmd_t; 101typedef struct { unsigned long ste[64]; } pmd_t;
64typedef struct { pmd_t pue[1]; } pud_t; 102typedef struct { pmd_t pue[1]; } pud_t;
65typedef struct { pud_t pge[1]; } pgd_t; 103typedef struct { pud_t pge[1]; } pgd_t;
104# endif /* CONFIG_MMU */
66 105
106# define pte_val(x) ((x).pte)
107# define pgprot_val(x) ((x).pgprot)
67 108
68#define pte_val(x) ((x).pte) 109# ifdef CONFIG_MMU
69#define pgprot_val(x) ((x).pgprot) 110# define pmd_val(x) ((x).pmd)
70#define pmd_val(x) ((x).ste[0]) 111# define pgd_val(x) ((x).pgd)
71#define pud_val(x) ((x).pue[0]) 112# else /* CONFIG_MMU */
72#define pgd_val(x) ((x).pge[0]) 113# define pmd_val(x) ((x).ste[0])
114# define pud_val(x) ((x).pue[0])
115# define pgd_val(x) ((x).pge[0])
116# endif /* CONFIG_MMU */
73 117
74#define __pte(x) ((pte_t) { (x) }) 118# define __pte(x) ((pte_t) { (x) })
75#define __pmd(x) ((pmd_t) { (x) }) 119# define __pmd(x) ((pmd_t) { (x) })
76#define __pgd(x) ((pgd_t) { (x) }) 120# define __pgd(x) ((pgd_t) { (x) })
77#define __pgprot(x) ((pgprot_t) { (x) }) 121# define __pgprot(x) ((pgprot_t) { (x) })
78 122
79/** 123/**
80 * Conversions for virtual address, physical address, pfn, and struct 124 * Conversions for virtual address, physical address, pfn, and struct
@@ -94,44 +138,80 @@ extern unsigned long max_low_pfn;
94extern unsigned long min_low_pfn; 138extern unsigned long min_low_pfn;
95extern unsigned long max_pfn; 139extern unsigned long max_pfn;
96 140
97#define __pa(vaddr) ((unsigned long) (vaddr)) 141extern unsigned long memory_start;
98#define __va(paddr) ((void *) (paddr)) 142extern unsigned long memory_end;
143extern unsigned long memory_size;
99 144
100#define phys_to_pfn(phys) (PFN_DOWN(phys)) 145extern int page_is_ram(unsigned long pfn);
101#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
102 146
103#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) 147# define phys_to_pfn(phys) (PFN_DOWN(phys))
104#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 148# define pfn_to_phys(pfn) (PFN_PHYS(pfn))
105 149
106#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 150# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
107#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 151# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
108 152
109#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 153# ifdef CONFIG_MMU
110#define page_to_bus(page) (page_to_phys(page)) 154# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
111#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 155# else /* CONFIG_MMU */
156# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
157# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
158# define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
159# define page_to_bus(page) (page_to_phys(page))
160# define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
161# endif /* CONFIG_MMU */
112 162
113extern unsigned int memory_start; 163# ifndef CONFIG_MMU
114extern unsigned int memory_end; 164# define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
115extern unsigned int memory_size; 165# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
166# else /* CONFIG_MMU */
167# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
168# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
169# define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
170# endif /* CONFIG_MMU */
116 171
117#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) 172# endif /* __ASSEMBLY__ */
118 173
119#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 174#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
120 175
121#else
122#define tophys(rd, rs) (addik rd, rs, 0)
123#define tovirt(rd, rs) (addik rd, rs, 0)
124#endif /* __ASSEMBLY__ */
125 176
126#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) 177# ifndef CONFIG_MMU
178# define __pa(vaddr) ((unsigned long) (vaddr))
179# define __va(paddr) ((void *) (paddr))
180# else /* CONFIG_MMU */
181# define __pa(x) __virt_to_phys((unsigned long)(x))
182# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
183# endif /* CONFIG_MMU */
184
127 185
128/* Convert between virtual and physical address for MMU. */ 186/* Convert between virtual and physical address for MMU. */
129/* Handle MicroBlaze processor with virtual memory. */ 187/* Handle MicroBlaze processor with virtual memory. */
188#ifndef CONFIG_MMU
130#define __virt_to_phys(addr) addr 189#define __virt_to_phys(addr) addr
131#define __phys_to_virt(addr) addr 190#define __phys_to_virt(addr) addr
191#define tophys(rd, rs) addik rd, rs, 0
192#define tovirt(rd, rs) addik rd, rs, 0
193#else
194#define __virt_to_phys(addr) \
195 ((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
196#define __phys_to_virt(addr) \
197 ((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
198#define tophys(rd, rs) \
199 addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
200#define tovirt(rd, rs) \
201 addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
202#endif /* CONFIG_MMU */
132 203
133#define TOPHYS(addr) __virt_to_phys(addr) 204#define TOPHYS(addr) __virt_to_phys(addr)
134 205
206#ifdef CONFIG_MMU
207#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
208#define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */
209#endif
210
211#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
212 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
213#endif /* CONFIG_MMU */
214
135#endif /* __KERNEL__ */ 215#endif /* __KERNEL__ */
136 216
137#include <asm-generic/memory_model.h> 217#include <asm-generic/memory_model.h>
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 2a4b35484010..59a757e46ba5 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,195 @@
9#ifndef _ASM_MICROBLAZE_PGALLOC_H 11#ifndef _ASM_MICROBLAZE_PGALLOC_H
10#define _ASM_MICROBLAZE_PGALLOC_H 12#define _ASM_MICROBLAZE_PGALLOC_H
11 13
14#ifdef CONFIG_MMU
15
16#include <linux/kernel.h> /* For min/max macros */
17#include <linux/highmem.h>
18#include <asm/setup.h>
19#include <asm/io.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22
23#define PGDIR_ORDER 0
24
25/*
26 * This is handled very differently on MicroBlaze since out page tables
27 * are all 0's and I want to be able to use these zero'd pages elsewhere
28 * as well - it gives us quite a speedup.
29 * -- Cort
30 */
31extern struct pgtable_cache_struct {
32 unsigned long *pgd_cache;
33 unsigned long *pte_cache;
34 unsigned long pgtable_cache_sz;
35} quicklists;
36
37#define pgd_quicklist (quicklists.pgd_cache)
38#define pmd_quicklist ((unsigned long *)0)
39#define pte_quicklist (quicklists.pte_cache)
40#define pgtable_cache_size (quicklists.pgtable_cache_sz)
41
42extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
43extern atomic_t zero_sz; /* # currently pre-zero'd pages */
44extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
45extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
46extern atomic_t zerototal; /* # pages zero'd over time */
47
48#define zero_quicklist (zero_cache)
49#define zero_cache_sz (zero_sz)
50#define zero_cache_calls (zeropage_calls)
51#define zero_cache_hits (zeropage_hits)
52#define zero_cache_total (zerototal)
53
54/*
55 * return a pre-zero'd page from the list,
56 * return NULL if none available -- Cort
57 */
58extern unsigned long get_zero_page_fast(void);
59
60extern void __bad_pte(pmd_t *pmd);
61
62extern inline pgd_t *get_pgd_slow(void)
63{
64 pgd_t *ret;
65
66 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
67 if (ret != NULL)
68 clear_page(ret);
69 return ret;
70}
71
72extern inline pgd_t *get_pgd_fast(void)
73{
74 unsigned long *ret;
75
76 ret = pgd_quicklist;
77 if (ret != NULL) {
78 pgd_quicklist = (unsigned long *)(*ret);
79 ret[0] = 0;
80 pgtable_cache_size--;
81 } else
82 ret = (unsigned long *)get_pgd_slow();
83 return (pgd_t *)ret;
84}
85
86extern inline void free_pgd_fast(pgd_t *pgd)
87{
88 *(unsigned long **)pgd = pgd_quicklist;
89 pgd_quicklist = (unsigned long *) pgd;
90 pgtable_cache_size++;
91}
92
93extern inline void free_pgd_slow(pgd_t *pgd)
94{
95 free_page((unsigned long)pgd);
96}
97
98#define pgd_free(mm, pgd) free_pgd_fast(pgd)
99#define pgd_alloc(mm) get_pgd_fast()
100
101#define pmd_pgtable(pmd) pmd_page(pmd)
102
103/*
104 * We don't have any real pmd's, and this code never triggers because
105 * the pgd will always be present..
106 */
107#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
108#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
109/* FIXME two definition - look below */
110#define pmd_free(mm, x) do { } while (0)
111#define pgd_populate(mm, pmd, pte) BUG()
112
113static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
114 unsigned long address)
115{
116 pte_t *pte;
117 extern int mem_init_done;
118 extern void *early_get_page(void);
119 if (mem_init_done) {
120 pte = (pte_t *)__get_free_page(GFP_KERNEL |
121 __GFP_REPEAT | __GFP_ZERO);
122 } else {
123 pte = (pte_t *)early_get_page();
124 if (pte)
125 clear_page(pte);
126 }
127 return pte;
128}
129
130static inline struct page *pte_alloc_one(struct mm_struct *mm,
131 unsigned long address)
132{
133 struct page *ptepage;
134
135#ifdef CONFIG_HIGHPTE
136 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
137#else
138 int flags = GFP_KERNEL | __GFP_REPEAT;
139#endif
140
141 ptepage = alloc_pages(flags, 0);
142 if (ptepage)
143 clear_highpage(ptepage);
144 return ptepage;
145}
146
147static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
148 unsigned long address)
149{
150 unsigned long *ret;
151
152 ret = pte_quicklist;
153 if (ret != NULL) {
154 pte_quicklist = (unsigned long *)(*ret);
155 ret[0] = 0;
156 pgtable_cache_size--;
157 }
158 return (pte_t *)ret;
159}
160
161extern inline void pte_free_fast(pte_t *pte)
162{
163 *(unsigned long **)pte = pte_quicklist;
164 pte_quicklist = (unsigned long *) pte;
165 pgtable_cache_size++;
166}
167
168extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
169{
170 free_page((unsigned long)pte);
171}
172
173extern inline void pte_free_slow(struct page *ptepage)
174{
175 __free_page(ptepage);
176}
177
178extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
179{
180 __free_page(ptepage);
181}
182
183#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
184
185#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
186
187#define pmd_populate_kernel(mm, pmd, pte) \
188 (pmd_val(*(pmd)) = (unsigned long) (pte))
189
190/*
191 * We don't have any real pmd's, and this code never triggers because
192 * the pgd will always be present..
193 */
194#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
195/*#define pmd_free(mm, x) do { } while (0)*/
196#define __pmd_free_tlb(tlb, x) do { } while (0)
197#define pgd_populate(mm, pmd, pte) BUG()
198
199extern int do_check_pgt_cache(int, int);
200
201#endif /* CONFIG_MMU */
202
12#define check_pgt_cache() do {} while (0) 203#define check_pgt_cache() do {} while (0)
13 204
14#endif /* _ASM_MICROBLAZE_PGALLOC_H */ 205#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 4df31e46568e..4c57a586a989 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +16,8 @@
14#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
15 remap_pfn_range(vma, vaddr, pfn, size, prot) 17 remap_pfn_range(vma, vaddr, pfn, size, prot)
16 18
19#ifndef CONFIG_MMU
20
17#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 21#define pgd_present(pgd) (1) /* pages are always present on non MMU */
18#define pgd_none(pgd) (0) 22#define pgd_none(pgd) (0)
19#define pgd_bad(pgd) (0) 23#define pgd_bad(pgd) (0)
@@ -27,6 +31,8 @@
27#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ 31#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
28#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ 32#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
29 33
34#define pgprot_noncached(x) (x)
35
30#define __swp_type(x) (0) 36#define __swp_type(x) (0)
31#define __swp_offset(x) (0) 37#define __swp_offset(x) (0)
32#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 38#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
@@ -45,6 +51,538 @@ static inline int pte_file(pte_t pte) { return 0; }
45 51
46#define arch_enter_lazy_cpu_mode() do {} while (0) 52#define arch_enter_lazy_cpu_mode() do {} while (0)
47 53
54#else /* CONFIG_MMU */
55
56#include <asm-generic/4level-fixup.h>
57
58#ifdef __KERNEL__
59#ifndef __ASSEMBLY__
60
61#include <linux/sched.h>
62#include <linux/threads.h>
63#include <asm/processor.h> /* For TASK_SIZE */
64#include <asm/mmu.h>
65#include <asm/page.h>
66
67#define FIRST_USER_ADDRESS 0
68
69extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address);
71extern unsigned long ioremap_bot, ioremap_base;
72
73/*
74 * The following only work if pte_present() is true.
75 * Undefined behaviour if not..
76 */
77
78static inline int pte_special(pte_t pte) { return 0; }
79
80static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
81
82/* Start and end of the vmalloc area. */
83/* Make sure to map the vmalloc area above the pinned kernel memory area
84 of 32Mb. */
85#define VMALLOC_START (CONFIG_KERNEL_START + \
86 max(32 * 1024 * 1024UL, memory_size))
87#define VMALLOC_END ioremap_bot
88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
89
90#endif /* __ASSEMBLY__ */
91
92/*
93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
94 * table containing PTEs, together with a set of 16 segment registers, to
95 * define the virtual to physical address mapping.
96 *
97 * We use the hash table as an extended TLB, i.e. a cache of currently
98 * active mappings. We maintain a two-level page table tree, much
99 * like that used by the i386, for the sake of the Linux memory
100 * management code. Low-level assembler code in hashtable.S
101 * (procedure hash_page) is responsible for extracting ptes from the
102 * tree and putting them into the hash table when necessary, and
103 * updating the accessed and modified bits in the page table tree.
104 */
105
106/*
107 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
108 * instruction and data sides share a unified, 64-entry, semi-associative
109 * TLB which is maintained totally under software control. In addition, the
110 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
111 * TLB which serves as a first level to the shared TLB. These two TLBs are
112 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
113 */
114
115/*
116 * The normal case is that PTEs are 32-bits and we have a 1-page
117 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
118 *
119 */
120
121/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
122#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
123#define PMD_SIZE (1UL << PMD_SHIFT)
124#define PMD_MASK (~(PMD_SIZE-1))
125
126/* PGDIR_SHIFT determines what a top-level page table entry can map */
127#define PGDIR_SHIFT PMD_SHIFT
128#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
129#define PGDIR_MASK (~(PGDIR_SIZE-1))
130
131/*
132 * entries per page directory level: our page-table tree is two-level, so
133 * we don't really have any PMD directory.
134 */
135#define PTRS_PER_PTE (1 << PTE_SHIFT)
136#define PTRS_PER_PMD 1
137#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
138
139#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
140#define FIRST_USER_PGD_NR 0
141
142#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
143#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
144
145#define pte_ERROR(e) \
146 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
147 __FILE__, __LINE__, pte_val(e))
148#define pmd_ERROR(e) \
149 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
150 __FILE__, __LINE__, pmd_val(e))
151#define pgd_ERROR(e) \
152 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
153 __FILE__, __LINE__, pgd_val(e))
154
155/*
156 * Bits in a linux-style PTE. These match the bits in the
157 * (hardware-defined) PTE as closely as possible.
158 */
159
160/* There are several potential gotchas here. The hardware TLBLO
161 * field looks like this:
162 *
163 * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
164 * RPN..................... 0 0 EX WR ZSEL....... W I M G
165 *
166 * Where possible we make the Linux PTE bits match up with this
167 *
168 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
169 * support down to 1k pages), this is done in the TLBMiss exception
170 * handler.
171 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
172 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
173 * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
174 * zone.
175 * - PRESENT *must* be in the bottom two bits because swap cache
176 * entries use the top 30 bits. Because 4xx doesn't support SMP
177 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
178 * is cleared in the TLB miss handler before the TLB entry is loaded.
179 * - All other bits of the PTE are loaded into TLBLO without
180 * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
181 * software PTE bits. We actually use use bits 21, 24, 25, and
182 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
183 * PRESENT.
184 */
185
186/* Definitions for MicroBlaze. */
187#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
188#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
189#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
190#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
191#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
192#define _PAGE_RW 0x040 /* software: Writes permitted */
193#define _PAGE_DIRTY 0x080 /* software: dirty page */
194#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
195#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
196#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
197#define _PMD_PRESENT PAGE_MASK
198
199/*
200 * Some bits are unused...
201 */
202#ifndef _PAGE_HASHPTE
203#define _PAGE_HASHPTE 0
204#endif
205#ifndef _PTE_NONE_MASK
206#define _PTE_NONE_MASK 0
207#endif
208#ifndef _PAGE_SHARED
209#define _PAGE_SHARED 0
210#endif
211#ifndef _PAGE_HWWRITE
212#define _PAGE_HWWRITE 0
213#endif
214#ifndef _PAGE_HWEXEC
215#define _PAGE_HWEXEC 0
216#endif
217#ifndef _PAGE_EXEC
218#define _PAGE_EXEC 0
219#endif
220
221#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
222
223/*
224 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
225 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
226 * to have it in the Linux PTE, and in fact the bit could be reused for
227 * another purpose. -- paulus.
228 */
229#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
230#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
231
232#define _PAGE_KERNEL \
233 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
234
235#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
236
237#define PAGE_NONE __pgprot(_PAGE_BASE)
238#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
239#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
240#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
241#define PAGE_SHARED_X \
242 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
243#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
244#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
245
246#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
247#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
248#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
249
250/*
251 * We consider execute permission the same as read.
252 * Also, write permissions imply read permissions.
253 */
254#define __P000 PAGE_NONE
255#define __P001 PAGE_READONLY_X
256#define __P010 PAGE_COPY
257#define __P011 PAGE_COPY_X
258#define __P100 PAGE_READONLY
259#define __P101 PAGE_READONLY_X
260#define __P110 PAGE_COPY
261#define __P111 PAGE_COPY_X
262
263#define __S000 PAGE_NONE
264#define __S001 PAGE_READONLY_X
265#define __S010 PAGE_SHARED
266#define __S011 PAGE_SHARED_X
267#define __S100 PAGE_READONLY
268#define __S101 PAGE_READONLY_X
269#define __S110 PAGE_SHARED
270#define __S111 PAGE_SHARED_X
271
272#ifndef __ASSEMBLY__
273/*
274 * ZERO_PAGE is a global shared page that is always zero: used
275 * for zero-mapped memory areas etc..
276 */
277extern unsigned long empty_zero_page[1024];
278#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
279
280#endif /* __ASSEMBLY__ */
281
282#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
283#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
284#define pte_clear(mm, addr, ptep) \
285 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
286
287#define pmd_none(pmd) (!pmd_val(pmd))
288#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
289#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
290#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
291
292#define pte_page(x) (mem_map + (unsigned long) \
293 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
294#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
295
296#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
297
298#define pfn_pte(pfn, prot) \
299 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
300
301#ifndef __ASSEMBLY__
302/*
303 * The "pgd_xxx()" functions here are trivial for a folded two-level
304 * setup: the pgd is never bad, and a pmd always exists (as it's folded
305 * into the pgd entry)
306 */
307static inline int pgd_none(pgd_t pgd) { return 0; }
308static inline int pgd_bad(pgd_t pgd) { return 0; }
309static inline int pgd_present(pgd_t pgd) { return 1; }
310#define pgd_clear(xp) do { } while (0)
311#define pgd_page(pgd) \
312 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
313
314/*
315 * The following only work if pte_present() is true.
316 * Undefined behaviour if not..
317 */
318static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
319static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
320static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
321static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
322static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
323/* FIXME */
324static inline int pte_file(pte_t pte) { return 0; }
325
326static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
327static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
328
329static inline pte_t pte_rdprotect(pte_t pte) \
330 { pte_val(pte) &= ~_PAGE_USER; return pte; }
331static inline pte_t pte_wrprotect(pte_t pte) \
332 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
333static inline pte_t pte_exprotect(pte_t pte) \
334 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
335static inline pte_t pte_mkclean(pte_t pte) \
336 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
337static inline pte_t pte_mkold(pte_t pte) \
338 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
339
340static inline pte_t pte_mkread(pte_t pte) \
341 { pte_val(pte) |= _PAGE_USER; return pte; }
342static inline pte_t pte_mkexec(pte_t pte) \
343 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
344static inline pte_t pte_mkwrite(pte_t pte) \
345 { pte_val(pte) |= _PAGE_RW; return pte; }
346static inline pte_t pte_mkdirty(pte_t pte) \
347 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
348static inline pte_t pte_mkyoung(pte_t pte) \
349 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
350
351/*
352 * Conversion functions: convert a page and protection to a page entry,
353 * and a page entry and page directory to the page they refer to.
354 */
355
356static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
357{
358 pte_t pte;
359 pte_val(pte) = physpage | pgprot_val(pgprot);
360 return pte;
361}
362
363#define mk_pte(page, pgprot) \
364({ \
365 pte_t pte; \
366 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
367 pgprot_val(pgprot); \
368 pte; \
369})
370
371static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
372{
373 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
374 return pte;
375}
376
377/*
378 * Atomic PTE updates.
379 *
380 * pte_update clears and sets bit atomically, and returns
381 * the old pte value.
382 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
383 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
384 */
385static inline unsigned long pte_update(pte_t *p, unsigned long clr,
386 unsigned long set)
387{
388 unsigned long old, tmp, msr;
389
390 __asm__ __volatile__("\
391 msrclr %2, 0x2\n\
392 nop\n\
393 lw %0, %4, r0\n\
394 andn %1, %0, %5\n\
395 or %1, %1, %6\n\
396 sw %1, %4, r0\n\
397 mts rmsr, %2\n\
398 nop"
399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
400 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
401 : "cc");
402
403 return old;
404}
405
406/*
407 * set_pte stores a linux PTE into the linux page table.
408 */
409static inline void set_pte(struct mm_struct *mm, unsigned long addr,
410 pte_t *ptep, pte_t pte)
411{
412 *ptep = pte;
413}
414
415static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
416 pte_t *ptep, pte_t pte)
417{
418 *ptep = pte;
419}
420
421static inline int ptep_test_and_clear_young(struct mm_struct *mm,
422 unsigned long addr, pte_t *ptep)
423{
424 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
425}
426
427static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
428 unsigned long addr, pte_t *ptep)
429{
430 return (pte_update(ptep, \
431 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
432}
433
434static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
435 unsigned long addr, pte_t *ptep)
436{
437 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
438}
439
440/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
441 unsigned long addr, pte_t *ptep)
442{
443 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
444}*/
445
446static inline void ptep_mkdirty(struct mm_struct *mm,
447 unsigned long addr, pte_t *ptep)
448{
449 pte_update(ptep, 0, _PAGE_DIRTY);
450}
451
452/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
453
454/* Convert pmd entry to page */
455/* our pmd entry is an effective address of pte table*/
456/* returns effective address of the pmd entry*/
457#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
458
459/* returns struct *page of the pmd entry*/
460#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
461
462/* to find an entry in a kernel page-table-directory */
463#define pgd_offset_k(address) pgd_offset(&init_mm, address)
464
465/* to find an entry in a page-table-directory */
466#define pgd_index(address) ((address) >> PGDIR_SHIFT)
467#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
468
469/* Find an entry in the second-level page table.. */
470static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
471{
472 return (pmd_t *) dir;
473}
474
475/* Find an entry in the third-level page table.. */
476#define pte_index(address) \
477 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
478#define pte_offset_kernel(dir, addr) \
479 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
480#define pte_offset_map(dir, addr) \
481 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
482#define pte_offset_map_nested(dir, addr) \
483 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
484
485#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
486#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
487
488/* Encode and decode a nonlinear file mapping entry */
489#define PTE_FILE_MAX_BITS 29
490#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
491#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) })
492
493extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
494
495/*
496 * When flushing the tlb entry for a page, we also need to flush the hash
497 * table entry. flush_hash_page is assembler (for speed) in hashtable.S.
498 */
499extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
500
501/* Add an HPTE to the hash table */
502extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
503
504/*
505 * Encode and decode a swap entry.
506 * Note that the bits we use in a PTE for representing a swap entry
507 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
508 * (if used). -- paulus
509 */
510#define __swp_type(entry) ((entry).val & 0x3f)
511#define __swp_offset(entry) ((entry).val >> 6)
512#define __swp_entry(type, offset) \
513 ((swp_entry_t) { (type) | ((offset) << 6) })
514#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
515#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
516
517
518/* CONFIG_APUS */
519/* For virtual address to physical address conversion */
520extern void cache_clear(__u32 addr, int length);
521extern void cache_push(__u32 addr, int length);
522extern int mm_end_of_chunk(unsigned long addr, int len);
523extern unsigned long iopa(unsigned long addr);
524/* extern unsigned long mm_ptov(unsigned long addr) \
525 __attribute__ ((const)); TBD */
526
527/* Values for nocacheflag and cmode */
528/* These are not used by the APUS kernel_map, but prevents
529 * compilation errors.
530 */
531#define IOMAP_FULL_CACHING 0
532#define IOMAP_NOCACHE_SER 1
533#define IOMAP_NOCACHE_NONSER 2
534#define IOMAP_NO_COPYBACK 3
535
536/*
537 * Map some physical address range into the kernel address space.
538 */
539extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
540 int nocacheflag, unsigned long *memavailp);
541
542/*
543 * Set cache mode of (kernel space) address range.
544 */
545extern void kernel_set_cachemode(unsigned long address, unsigned long size,
546 unsigned int cmode);
547
548/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
549#define kern_addr_valid(addr) (1)
550
551#define io_remap_page_range remap_page_range
552
553/*
554 * No page table caches to initialise
555 */
556#define pgtable_cache_init() do { } while (0)
557
558void do_page_fault(struct pt_regs *regs, unsigned long address,
559 unsigned long error_code);
560
561void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
562 unsigned int size, int flags);
563
564void __init adjust_total_lowmem(void);
565void mapin_ram(void);
566int map_page(unsigned long va, phys_addr_t pa, int flags);
567
568extern int mem_init_done;
569extern unsigned long ioremap_base;
570extern unsigned long ioremap_bot;
571
572asmlinkage void __init mmu_init(void);
573
574void __init *early_get_page(void);
575
576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
577void consistent_free(void *vaddr);
578void consistent_sync(void *vaddr, size_t size, int direction);
579void consistent_sync_page(struct page *page, unsigned long offset,
580 size_t size, int direction);
581#endif /* __ASSEMBLY__ */
582#endif /* __KERNEL__ */
583
584#endif /* CONFIG_MMU */
585
48#ifndef __ASSEMBLY__ 586#ifndef __ASSEMBLY__
49#include <asm-generic/pgtable.h> 587#include <asm-generic/pgtable.h>
50 588
diff --git a/arch/microblaze/include/asm/posix_types.h b/arch/microblaze/include/asm/posix_types.h
index b4df41c5dde2..8c758b231f37 100644
--- a/arch/microblaze/include/asm/posix_types.h
+++ b/arch/microblaze/include/asm/posix_types.h
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18typedef unsigned long __kernel_ino_t; 18typedef unsigned long __kernel_ino_t;
19typedef unsigned int __kernel_mode_t; 19typedef unsigned short __kernel_mode_t;
20typedef unsigned int __kernel_nlink_t; 20typedef unsigned int __kernel_nlink_t;
21typedef long __kernel_off_t; 21typedef long __kernel_off_t;
22typedef int __kernel_pid_t; 22typedef int __kernel_pid_t;
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 9329029d2614..563c6b9453f0 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op;
26# define cpu_sleep() do {} while (0) 26# define cpu_sleep() do {} while (0)
27# define prepare_to_copy(tsk) do {} while (0) 27# define prepare_to_copy(tsk) do {} while (0)
28 28
29# endif /* __ASSEMBLY__ */
30
31#define task_pt_regs(tsk) \ 29#define task_pt_regs(tsk) \
32 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) 30 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
33 31
34/* Do necessary setup to start up a newly executed thread. */ 32/* Do necessary setup to start up a newly executed thread. */
35void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); 33void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
36 34
35# endif /* __ASSEMBLY__ */
36
37# ifndef CONFIG_MMU
37/* 38/*
38 * User space process size: memory size 39 * User space process size: memory size
39 * 40 *
@@ -85,4 +86,90 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
85# define KSTK_EIP(tsk) (0) 86# define KSTK_EIP(tsk) (0)
86# define KSTK_ESP(tsk) (0) 87# define KSTK_ESP(tsk) (0)
87 88
89# else /* CONFIG_MMU */
90
91/*
92 * This is used to define STACK_TOP, and with MMU it must be below
93 * kernel base to select the correct PGD when handling MMU exceptions.
94 */
95# define TASK_SIZE (CONFIG_KERNEL_START)
96
97/*
98 * This decides where the kernel will search for a free chunk of vm
99 * space during mmap's.
100 */
101# define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
102
103# define THREAD_KSP 0
104
105# ifndef __ASSEMBLY__
106
107/*
108 * Default implementation of macro that returns current
109 * instruction pointer ("program counter").
110 */
111# define current_text_addr() ({ __label__ _l; _l: &&_l; })
112
113/* If you change this, you must change the associated assembly-languages
114 * constants defined below, THREAD_*.
115 */
116struct thread_struct {
117 /* kernel stack pointer (must be first field in structure) */
118 unsigned long ksp;
119 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
120 void *pgdir; /* root of page-table tree */
121 struct pt_regs *regs; /* Pointer to saved register state */
122};
123
124# define INIT_THREAD { \
125 .ksp = sizeof init_stack + (unsigned long)init_stack, \
126 .pgdir = swapper_pg_dir, \
127}
128
129/* Do necessary setup to start up a newly executed thread. */
130void start_thread(struct pt_regs *regs,
131 unsigned long pc, unsigned long usp);
132
133/* Free all resources held by a thread. */
134extern inline void release_thread(struct task_struct *dead_task)
135{
136}
137
138extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
139
140/* Free current thread data structures etc. */
141static inline void exit_thread(void)
142{
143}
144
145/* Return saved (kernel) PC of a blocked thread. */
146# define thread_saved_pc(tsk) \
147 ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
148
149unsigned long get_wchan(struct task_struct *p);
150
151/* The size allocated for kernel stacks. This _must_ be a power of two! */
152# define KERNEL_STACK_SIZE 0x2000
153
154/* Return some info about the user process TASK. */
155# define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE)
156# define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
157
158# define task_pt_regs_plus_args(tsk) \
159 (((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE)
160
161# define task_sp(task) (task_regs(task)->r1)
162# define task_pc(task) (task_regs(task)->pc)
163/* Grotty old names for some. */
164# define KSTK_EIP(task) (task_pc(task))
165# define KSTK_ESP(task) (task_sp(task))
166
167/* FIXME */
168# define deactivate_mm(tsk, mm) do { } while (0)
169
170# define STACK_TOP TASK_SIZE
171# define STACK_TOP_MAX STACK_TOP
172
173# endif /* __ASSEMBLY__ */
174# endif /* CONFIG_MMU */
88#endif /* _ASM_MICROBLAZE_PROCESSOR_H */ 175#endif /* _ASM_MICROBLAZE_PROCESSOR_H */
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h
index 55015bce5e47..a917dc517736 100644
--- a/arch/microblaze/include/asm/ptrace.h
+++ b/arch/microblaze/include/asm/ptrace.h
@@ -10,7 +10,6 @@
10#define _ASM_MICROBLAZE_PTRACE_H 10#define _ASM_MICROBLAZE_PTRACE_H
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/types.h>
14 13
15typedef unsigned long microblaze_reg_t; 14typedef unsigned long microblaze_reg_t;
16 15
diff --git a/arch/microblaze/include/asm/registers.h b/arch/microblaze/include/asm/registers.h
index 834142d9356f..68c3afb73877 100644
--- a/arch/microblaze/include/asm/registers.h
+++ b/arch/microblaze/include/asm/registers.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -30,4 +30,21 @@
30#define FSR_UF (1<<1) /* Underflow */ 30#define FSR_UF (1<<1) /* Underflow */
31#define FSR_DO (1<<0) /* Denormalized operand error */ 31#define FSR_DO (1<<0) /* Denormalized operand error */
32 32
33# ifdef CONFIG_MMU
34/* Machine State Register (MSR) Fields */
35# define MSR_UM (1<<11) /* User Mode */
36# define MSR_UMS (1<<12) /* User Mode Save */
37# define MSR_VM (1<<13) /* Virtual Mode */
38# define MSR_VMS (1<<14) /* Virtual Mode Save */
39
40# define MSR_KERNEL (MSR_EE | MSR_VM)
41/* # define MSR_USER (MSR_KERNEL | MSR_UM | MSR_IE) */
42# define MSR_KERNEL_VMS (MSR_EE | MSR_VMS)
43/* # define MSR_USER_VMS (MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */
44
45/* Exception State Register (ESR) Fields */
46# define ESR_DIZ (1<<11) /* Zone Protection */
47# define ESR_S (1<<10) /* Store instruction */
48
49# endif /* CONFIG_MMU */
33#endif /* _ASM_MICROBLAZE_REGISTERS_H */ 50#endif /* _ASM_MICROBLAZE_REGISTERS_H */
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 8434a43e5421..4487e150b455 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +16,7 @@
14# ifndef __ASSEMBLY__ 16# ifndef __ASSEMBLY__
15extern char _ssbss[], _esbss[]; 17extern char _ssbss[], _esbss[];
16extern unsigned long __ivt_start[], __ivt_end[]; 18extern unsigned long __ivt_start[], __ivt_end[];
19extern char _etext[], _stext[];
17 20
18# ifdef CONFIG_MTD_UCLINUX 21# ifdef CONFIG_MTD_UCLINUX
19extern char *_ebss; 22extern char *_ebss;
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
index 7f5dcc56eea1..0e7102c3fb11 100644
--- a/arch/microblaze/include/asm/segment.h
+++ b/arch/microblaze/include/asm/segment.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -11,7 +11,7 @@
11#ifndef _ASM_MICROBLAZE_SEGMENT_H 11#ifndef _ASM_MICROBLAZE_SEGMENT_H
12#define _ASM_MICROBLAZE_SEGMENT_H 12#define _ASM_MICROBLAZE_SEGMENT_H
13 13
14#ifndef __ASSEMBLY__ 14# ifndef __ASSEMBLY__
15 15
16typedef struct { 16typedef struct {
17 unsigned long seg; 17 unsigned long seg;
@@ -29,15 +29,21 @@ typedef struct {
29 * 29 *
30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. 30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
31 */ 31 */
32# define KERNEL_DS ((mm_segment_t){0}) 32# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34# ifndef CONFIG_MMU
35# define KERNEL_DS MAKE_MM_SEG(0)
33# define USER_DS KERNEL_DS 36# define USER_DS KERNEL_DS
37# else
38# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
40# endif
34 41
35# define get_ds() (KERNEL_DS) 42# define get_ds() (KERNEL_DS)
36# define get_fs() (current_thread_info()->addr_limit) 43# define get_fs() (current_thread_info()->addr_limit)
37# define set_fs(x) \ 44# define set_fs(val) (current_thread_info()->addr_limit = (val))
38 do { current_thread_info()->addr_limit = (x); } while (0)
39 45
40# define segment_eq(a, b) ((a).seg == (b).seg) 46# define segment_eq(a, b) ((a).seg == (b).seg)
41 47
42# endif /* __ASSEMBLY__ */ 48# endif /* __ASSEMBLY__ */
43#endif /* _ASM_MICROBLAZE_SEGMENT_H */ 49#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 9b98e8e6abae..27f8dafd8c34 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
3 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
4 * 5 *
5 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -18,7 +19,6 @@
18extern unsigned int boot_cpuid; /* move to smp.h */ 19extern unsigned int boot_cpuid; /* move to smp.h */
19 20
20extern char cmd_line[COMMAND_LINE_SIZE]; 21extern char cmd_line[COMMAND_LINE_SIZE];
21# endif/* __KERNEL__ */
22 22
23void early_printk(const char *fmt, ...); 23void early_printk(const char *fmt, ...);
24 24
@@ -30,6 +30,11 @@ void setup_heartbeat(void);
30 30
31unsigned long long sched_clock(void); 31unsigned long long sched_clock(void);
32 32
33# ifdef CONFIG_MMU
34extern void mmu_reset(void);
35extern void early_console_reg_tlb_alloc(unsigned int addr);
36# endif /* CONFIG_MMU */
37
33void time_init(void); 38void time_init(void);
34void init_IRQ(void); 39void init_IRQ(void);
35void machine_early_init(const char *cmdline, unsigned int ram, 40void machine_early_init(const char *cmdline, unsigned int ram,
@@ -40,5 +45,6 @@ void machine_shutdown(void);
40void machine_halt(void); 45void machine_halt(void);
41void machine_power_off(void); 46void machine_power_off(void);
42 47
48# endif/* __KERNEL__ */
43# endif /* __ASSEMBLY__ */ 49# endif /* __ASSEMBLY__ */
44#endif /* _ASM_MICROBLAZE_SETUP_H */ 50#endif /* _ASM_MICROBLAZE_SETUP_H */
diff --git a/arch/microblaze/include/asm/stat.h b/arch/microblaze/include/asm/stat.h
index 5f18b8aed220..a15f77520bfd 100644
--- a/arch/microblaze/include/asm/stat.h
+++ b/arch/microblaze/include/asm/stat.h
@@ -16,58 +16,53 @@
16 16
17#include <linux/posix_types.h> 17#include <linux/posix_types.h>
18 18
19#define STAT_HAVE_NSEC 1
20
19struct stat { 21struct stat {
20 unsigned int st_dev; 22 unsigned long st_dev;
21 unsigned long st_ino; 23 unsigned long st_ino;
22 unsigned int st_mode; 24 unsigned int st_mode;
23 unsigned int st_nlink; 25 unsigned int st_nlink;
24 unsigned int st_uid; 26 unsigned int st_uid;
25 unsigned int st_gid; 27 unsigned int st_gid;
26 unsigned int st_rdev; 28 unsigned long st_rdev;
27 unsigned long st_size; 29 unsigned long __pad1;
28 unsigned long st_blksize; 30 long st_size;
29 unsigned long st_blocks; 31 int st_blksize;
30 unsigned long st_atime; 32 int __pad2;
31 unsigned long __unused1; /* unsigned long st_atime_nsec */ 33 long st_blocks;
32 unsigned long st_mtime; 34 int st_atime;
33 unsigned long __unused2; /* unsigned long st_mtime_nsec */ 35 unsigned int st_atime_nsec;
34 unsigned long st_ctime; 36 int st_mtime;
35 unsigned long __unused3; /* unsigned long st_ctime_nsec */ 37 unsigned int st_mtime_nsec;
38 int st_ctime;
39 unsigned int st_ctime_nsec;
36 unsigned long __unused4; 40 unsigned long __unused4;
37 unsigned long __unused5; 41 unsigned long __unused5;
38}; 42};
39 43
40struct stat64 { 44struct stat64 {
41 unsigned long long st_dev; 45 unsigned long long st_dev; /* Device. */
42 unsigned long __unused1; 46 unsigned long long st_ino; /* File serial number. */
43 47 unsigned int st_mode; /* File mode. */
44 unsigned long long st_ino; 48 unsigned int st_nlink; /* Link count. */
45 49 unsigned int st_uid; /* User ID of the file's owner. */
46 unsigned int st_mode; 50 unsigned int st_gid; /* Group ID of the file's group. */
47 unsigned int st_nlink; 51 unsigned long long st_rdev; /* Device number, if device. */
48 52 unsigned long long __pad1;
49 unsigned int st_uid; 53 long long st_size; /* Size of file, in bytes. */
50 unsigned int st_gid; 54 int st_blksize; /* Optimal block size for I/O. */
51 55 int __pad2;
52 unsigned long long st_rdev; 56 long long st_blocks; /* Number 512-byte blocks allocated. */
53 unsigned long __unused3; 57 int st_atime; /* Time of last access. */
54 58 unsigned int st_atime_nsec;
55 long long st_size; 59 int st_mtime; /* Time of last modification. */
56 unsigned long st_blksize; 60 unsigned int st_mtime_nsec;
57 61 int st_ctime; /* Time of last status change. */
58 unsigned long st_blocks; /* No. of 512-byte blocks allocated */ 62 unsigned int st_ctime_nsec;
59 unsigned long __unused4; /* future possible st_blocks high bits */ 63 unsigned int __unused4;
60 64 unsigned int __unused5;
61 unsigned long st_atime;
62 unsigned long st_atime_nsec;
63
64 unsigned long st_mtime;
65 unsigned long st_mtime_nsec;
66
67 unsigned long st_ctime;
68 unsigned long st_ctime_nsec;
69
70 unsigned long __unused8;
71}; 65};
72 66
73#endif /* _ASM_MICROBLAZE_STAT_H */ 67#endif /* _ASM_MICROBLAZE_STAT_H */
68
diff --git a/arch/microblaze/include/asm/string.h b/arch/microblaze/include/asm/string.h
index f7728c90fc18..aec2f59298b8 100644
--- a/arch/microblaze/include/asm/string.h
+++ b/arch/microblaze/include/asm/string.h
@@ -9,7 +9,7 @@
9#ifndef _ASM_MICROBLAZE_STRING_H 9#ifndef _ASM_MICROBLAZE_STRING_H
10#define _ASM_MICROBLAZE_STRING_H 10#define _ASM_MICROBLAZE_STRING_H
11 11
12#ifndef __KERNEL__ 12#ifdef __KERNEL__
13 13
14#define __HAVE_ARCH_MEMSET 14#define __HAVE_ARCH_MEMSET
15#define __HAVE_ARCH_MEMCPY 15#define __HAVE_ARCH_MEMCPY
diff --git a/arch/microblaze/include/asm/syscalls.h b/arch/microblaze/include/asm/syscalls.h
index 9cb4ff0edeb2..ddea9eb31f8d 100644
--- a/arch/microblaze/include/asm/syscalls.h
+++ b/arch/microblaze/include/asm/syscalls.h
@@ -34,6 +34,9 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
34asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act, 34asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act,
35 struct old_sigaction *oact); 35 struct old_sigaction *oact);
36 36
37asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
38 struct sigaction __user *oact, size_t sigsetsize);
39
37asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 40asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
38 struct pt_regs *regs); 41 struct pt_regs *regs);
39 42
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 4c3943e3f403..7fac44498445 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -122,6 +122,8 @@ static inline struct thread_info *current_thread_info(void)
122#define TIF_SINGLESTEP 4 122#define TIF_SINGLESTEP 4
123#define TIF_IRET 5 /* return with iret */ 123#define TIF_IRET 5 /* return with iret */
124#define TIF_MEMDIE 6 124#define TIF_MEMDIE 6
125#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
126#define TIF_SECCOMP 10 /* secure computing */
125#define TIF_FREEZE 14 /* Freezing for suspend */ 127#define TIF_FREEZE 14 /* Freezing for suspend */
126 128
127/* FIXME change in entry.S */ 129/* FIXME change in entry.S */
@@ -138,10 +140,17 @@ static inline struct thread_info *current_thread_info(void)
138#define _TIF_IRET (1<<TIF_IRET) 140#define _TIF_IRET (1<<TIF_IRET)
139#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 141#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
140#define _TIF_FREEZE (1<<TIF_FREEZE) 142#define _TIF_FREEZE (1<<TIF_FREEZE)
143#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
144#define _TIF_SECCOMP (1 << TIF_SECCOMP)
141#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) 145#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE)
142 146
147/* work to do in syscall trace */
148#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
149 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
150
143/* work to do on interrupt/exception return */ 151/* work to do on interrupt/exception return */
144#define _TIF_WORK_MASK 0x0000FFFE 152#define _TIF_WORK_MASK 0x0000FFFE
153
145/* work to do on any return to u-space */ 154/* work to do on any return to u-space */
146#define _TIF_ALLWORK_MASK 0x0000FFFF 155#define _TIF_ALLWORK_MASK 0x0000FFFF
147 156
@@ -154,6 +163,17 @@ static inline struct thread_info *current_thread_info(void)
154 */ 163 */
155/* FPU was used by this task this quantum (SMP) */ 164/* FPU was used by this task this quantum (SMP) */
156#define TS_USEDFPU 0x0001 165#define TS_USEDFPU 0x0001
166#define TS_RESTORE_SIGMASK 0x0002
167
168#ifndef __ASSEMBLY__
169#define HAVE_SET_RESTORE_SIGMASK 1
170static inline void set_restore_sigmask(void)
171{
172 struct thread_info *ti = current_thread_info();
173 ti->status |= TS_RESTORE_SIGMASK;
174 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
175}
176#endif
157 177
158#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
159#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ 179#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index d1dfe3791127..c472d2801132 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -13,4 +15,10 @@
13 15
14#include <asm-generic/tlb.h> 16#include <asm-generic/tlb.h>
15 17
18#ifdef CONFIG_MMU
19#define tlb_start_vma(tlb, vma) do { } while (0)
20#define tlb_end_vma(tlb, vma) do { } while (0)
21#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
22#endif
23
16#endif /* _ASM_MICROBLAZE_TLB_H */ 24#endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index d7fe7629001b..eb31a0e8a772 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,50 @@
9#ifndef _ASM_MICROBLAZE_TLBFLUSH_H 11#ifndef _ASM_MICROBLAZE_TLBFLUSH_H
10#define _ASM_MICROBLAZE_TLBFLUSH_H 12#define _ASM_MICROBLAZE_TLBFLUSH_H
11 13
14#ifdef CONFIG_MMU
15
16#include <linux/sched.h>
17#include <linux/threads.h>
18#include <asm/processor.h> /* For TASK_SIZE */
19#include <asm/mmu.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22
23extern void _tlbie(unsigned long address);
24extern void _tlbia(void);
25
26#define __tlbia() _tlbia()
27
28static inline void local_flush_tlb_all(void)
29 { __tlbia(); }
30static inline void local_flush_tlb_mm(struct mm_struct *mm)
31 { __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33 unsigned long vmaddr)
34 { _tlbie(vmaddr); }
35static inline void local_flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end)
37 { __tlbia(); }
38
39#define flush_tlb_kernel_range(start, end) do { } while (0)
40
41#define update_mmu_cache(vma, addr, pte) do { } while (0)
42
43#define flush_tlb_all local_flush_tlb_all
44#define flush_tlb_mm local_flush_tlb_mm
45#define flush_tlb_page local_flush_tlb_page
46#define flush_tlb_range local_flush_tlb_range
47
48/*
49 * This is called in munmap when we have freed up some page-table
50 * pages. We don't need to do anything here, there's nothing special
51 * about our page-table pages. -- paulus
52 */
53static inline void flush_tlb_pgtables(struct mm_struct *mm,
54 unsigned long start, unsigned long end) { }
55
56#else /* CONFIG_MMU */
57
12#define flush_tlb() BUG() 58#define flush_tlb() BUG()
13#define flush_tlb_all() BUG() 59#define flush_tlb_all() BUG()
14#define flush_tlb_mm(mm) BUG() 60#define flush_tlb_mm(mm) BUG()
@@ -17,4 +63,6 @@
17#define flush_tlb_pgtables(mm, start, end) BUG() 63#define flush_tlb_pgtables(mm, start, end) BUG()
18#define flush_tlb_kernel_range(start, end) BUG() 64#define flush_tlb_kernel_range(start, end) BUG()
19 65
66#endif /* CONFIG_MMU */
67
20#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */ 68#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 5a3ffc308e12..65adad61e7e9 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -26,6 +28,10 @@
26#define VERIFY_READ 0 28#define VERIFY_READ 0
27#define VERIFY_WRITE 1 29#define VERIFY_WRITE 1
28 30
31#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
32
33#ifndef CONFIG_MMU
34
29extern int ___range_ok(unsigned long addr, unsigned long size); 35extern int ___range_ok(unsigned long addr, unsigned long size);
30 36
31#define __range_ok(addr, size) \ 37#define __range_ok(addr, size) \
@@ -34,68 +40,68 @@ extern int ___range_ok(unsigned long addr, unsigned long size);
34#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 40#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
35#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) 41#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
36 42
37extern inline int bad_user_access_length(void) 43/* Undefined function to trigger linker error */
38{ 44extern int bad_user_access_length(void);
39 return 0; 45
40}
41/* FIXME this is function for optimalization -> memcpy */ 46/* FIXME this is function for optimalization -> memcpy */
42#define __get_user(var, ptr) \ 47#define __get_user(var, ptr) \
43 ({ \ 48({ \
44 int __gu_err = 0; \ 49 int __gu_err = 0; \
45 switch (sizeof(*(ptr))) { \ 50 switch (sizeof(*(ptr))) { \
46 case 1: \ 51 case 1: \
47 case 2: \ 52 case 2: \
48 case 4: \ 53 case 4: \
49 (var) = *(ptr); \ 54 (var) = *(ptr); \
50 break; \ 55 break; \
51 case 8: \ 56 case 8: \
52 memcpy((void *) &(var), (ptr), 8); \ 57 memcpy((void *) &(var), (ptr), 8); \
53 break; \ 58 break; \
54 default: \ 59 default: \
55 (var) = 0; \ 60 (var) = 0; \
56 __gu_err = __get_user_bad(); \ 61 __gu_err = __get_user_bad(); \
57 break; \ 62 break; \
58 } \ 63 } \
59 __gu_err; \ 64 __gu_err; \
60 }) 65})
61 66
62#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 67#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
63 68
69/* FIXME is not there defined __pu_val */
64#define __put_user(var, ptr) \ 70#define __put_user(var, ptr) \
65 ({ \ 71({ \
66 int __pu_err = 0; \ 72 int __pu_err = 0; \
67 switch (sizeof(*(ptr))) { \ 73 switch (sizeof(*(ptr))) { \
68 case 1: \ 74 case 1: \
69 case 2: \ 75 case 2: \
70 case 4: \ 76 case 4: \
71 *(ptr) = (var); \ 77 *(ptr) = (var); \
72 break; \ 78 break; \
73 case 8: { \ 79 case 8: { \
74 typeof(*(ptr)) __pu_val = var; \ 80 typeof(*(ptr)) __pu_val = (var); \
75 memcpy(ptr, &__pu_val, sizeof(__pu_val));\ 81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
76 } \ 82 } \
77 break; \ 83 break; \
78 default: \ 84 default: \
79 __pu_err = __put_user_bad(); \ 85 __pu_err = __put_user_bad(); \
80 break; \ 86 break; \
81 } \ 87 } \
82 __pu_err; \ 88 __pu_err; \
83 }) 89})
84 90
85#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) 91#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
86 92
87#define put_user(x, ptr) __put_user(x, ptr) 93#define put_user(x, ptr) __put_user((x), (ptr))
88#define get_user(x, ptr) __get_user(x, ptr) 94#define get_user(x, ptr) __get_user((x), (ptr))
89 95
90#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) 96#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
91#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) 97#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
92 98
93#define __copy_to_user(to, from, n) (copy_to_user(to, from, n)) 99#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
94#define __copy_from_user(to, from, n) (copy_from_user(to, from, n)) 100#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
95#define __copy_to_user_inatomic(to, from, n) (__copy_to_user(to, from, n)) 101#define __copy_to_user_inatomic(to, from, n) \
96#define __copy_from_user_inatomic(to, from, n) (__copy_from_user(to, from, n)) 102 (__copy_to_user((to), (from), (n)))
97 103#define __copy_from_user_inatomic(to, from, n) \
98#define __clear_user(addr, n) (memset((void *)addr, 0, n), 0) 104 (__copy_from_user((to), (from), (n)))
99 105
100static inline unsigned long clear_user(void *addr, unsigned long size) 106static inline unsigned long clear_user(void *addr, unsigned long size)
101{ 107{
@@ -104,13 +110,200 @@ static inline unsigned long clear_user(void *addr, unsigned long size)
104 return size; 110 return size;
105} 111}
106 112
107/* Returns 0 if exception not found and fixup otherwise. */ 113/* Returns 0 if exception not found and fixup otherwise. */
108extern unsigned long search_exception_table(unsigned long); 114extern unsigned long search_exception_table(unsigned long);
109 115
116extern long strncpy_from_user(char *dst, const char *src, long count);
117extern long strnlen_user(const char *src, long count);
118
119#else /* CONFIG_MMU */
120
121/*
122 * Address is valid if:
123 * - "addr", "addr + size" and "size" are all below the limit
124 */
125#define access_ok(type, addr, size) \
126 (get_fs().seg > (((unsigned long)(addr)) | \
127 (size) | ((unsigned long)(addr) + (size))))
128
129/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */
131
132/*
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
136 */
137
138#define get_user(x, ptr) \
139({ \
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
141 ? __get_user((x), (ptr)) : -EFAULT; \
142})
143
144#define put_user(x, ptr) \
145({ \
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
147 ? __put_user((x), (ptr)) : -EFAULT; \
148})
149
150#define __get_user(x, ptr) \
151({ \
152 unsigned long __gu_val; \
153 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
154 long __gu_err; \
155 switch (sizeof(*(ptr))) { \
156 case 1: \
157 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
158 break; \
159 case 2: \
160 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
161 break; \
162 case 4: \
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
164 break; \
165 default: \
166 __gu_val = 0; __gu_err = -EINVAL; \
167 } \
168 x = (__typeof__(*(ptr))) __gu_val; \
169 __gu_err; \
170})
171
172#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
173({ \
174 __asm__ __volatile__ ( \
175 "1:" insn " %1, %2, r0; \
176 addk %0, r0, r0; \
177 2: \
178 .section .fixup,\"ax\"; \
179 3: brid 2b; \
180 addik %0, r0, %3; \
181 .previous; \
182 .section __ex_table,\"a\"; \
183 .word 1b,3b; \
184 .previous;" \
185 : "=r"(__gu_err), "=r"(__gu_val) \
186 : "r"(__gu_ptr), "i"(-EFAULT) \
187 ); \
188})
189
190#define __put_user(x, ptr) \
191({ \
192 __typeof__(*(ptr)) __gu_val = x; \
193 long __gu_err = 0; \
194 switch (sizeof(__gu_val)) { \
195 case 1: \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
197 break; \
198 case 2: \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
200 break; \
201 case 4: \
202 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
203 break; \
204 case 8: \
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \
206 break; \
207 default: \
208 __gu_err = -EINVAL; \
209 } \
210 __gu_err; \
211})
212
213#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
214({ \
215__asm__ __volatile__ (" lwi %0, %1, 0; \
216 1: swi %0, %2, 0; \
217 lwi %0, %1, 4; \
218 2: swi %0, %2, 4; \
219 addk %0,r0,r0; \
220 3: \
221 .section .fixup,\"ax\"; \
222 4: brid 3b; \
223 addik %0, r0, %3; \
224 .previous; \
225 .section __ex_table,\"a\"; \
226 .word 1b,4b,2b,4b; \
227 .previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(&__gu_val), \
230 "r"(__gu_ptr), "i"(-EFAULT) \
231 ); \
232})
233
234#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
235({ \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
238 addk %0, r0, r0; \
239 2: \
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err) \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251
252/*
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
254 */
255static inline int clear_user(char *to, int size)
256{
257 if (size && access_ok(VERIFY_WRITE, to, size)) {
258 __asm__ __volatile__ (" \
259 1: \
260 sb r0, %2, r0; \
261 addik %0, %0, -1; \
262 bneid %0, 1b; \
263 addik %2, %2, 1; \
264 2: \
265 .section __ex_table,\"a\"; \
266 .word 1b,2b; \
267 .section .text;" \
268 : "=r"(size) \
269 : "0"(size), "r"(to)
270 );
271 }
272 return size;
273}
274
275extern unsigned long __copy_tofrom_user(void __user *to,
276 const void __user *from, unsigned long size);
277
278#define copy_to_user(to, from, n) \
279 (access_ok(VERIFY_WRITE, (to), (n)) ? \
280 __copy_tofrom_user((void __user *)(to), \
281 (__force const void __user *)(from), (n)) \
282 : -EFAULT)
283
284#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
285#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
286
287#define copy_from_user(to, from, n) \
288 (access_ok(VERIFY_READ, (from), (n)) ? \
289 __copy_tofrom_user((__force void __user *)(to), \
290 (void __user *)(from), (n)) \
291 : -EFAULT)
292
293#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
294#define __copy_from_user_inatomic(to, from, n) \
295 copy_from_user((to), (from), (n))
296
297extern int __strncpy_user(char *to, const char __user *from, int len);
298extern int __strnlen_user(const char __user *sstr, int len);
299
300#define strncpy_from_user(to, from, len) \
301 (access_ok(VERIFY_READ, from, 1) ? \
302 __strncpy_user(to, from, len) : -EFAULT)
303#define strnlen_user(str, len) \
304 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
110 305
111extern long strncpy_from_user(char *dst, const char __user *src, long count); 306#endif /* CONFIG_MMU */
112extern long strnlen_user(const char __user *src, long count);
113extern long __strncpy_from_user(char *dst, const char __user *src, long count);
114 307
115/* 308/*
116 * The exception table consists of pairs of addresses: the first is the 309 * The exception table consists of pairs of addresses: the first is the
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h
index 9d66b640c910..3658d91ac0fb 100644
--- a/arch/microblaze/include/asm/unaligned.h
+++ b/arch/microblaze/include/asm/unaligned.h
@@ -12,7 +12,8 @@
12 12
13# ifdef __KERNEL__ 13# ifdef __KERNEL__
14 14
15# include <linux/unaligned/access_ok.h> 15# include <linux/unaligned/be_struct.h>
16# include <linux/unaligned/le_byteshift.h>
16# include <linux/unaligned/generic.h> 17# include <linux/unaligned/generic.h>
17 18
18# define get_unaligned __get_unaligned_be 19# define get_unaligned __get_unaligned_be
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index da94bec4ecba..f4a5e19a20eb 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
15obj-$(CONFIG_SELFMOD) += selfmod.o 15obj-$(CONFIG_SELFMOD) += selfmod.o
16obj-$(CONFIG_HEART_BEAT) += heartbeat.o 16obj-$(CONFIG_HEART_BEAT) += heartbeat.o
17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o 17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
18obj-$(CONFIG_MMU) += misc.o
18 19
19obj-y += entry$(MMUEXT).o 20obj-y += entry$(MMUEXT).o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index aabd9e9423a6..7bc7b68f97db 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
2 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
3 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
4 * 5 *
@@ -68,16 +69,26 @@ int main(int argc, char *argv[])
68 69
69 /* struct task_struct */ 70 /* struct task_struct */
70 DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); 71 DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
72#ifdef CONFIG_MMU
73 DEFINE(TASK_STATE, offsetof(struct task_struct, state));
74 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
75 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
76 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
77 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
78 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
79 DEFINE(TASK_PID, offsetof(struct task_struct, pid));
80 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
81 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
82 BLANK();
83
84 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
85 BLANK();
86#endif
71 87
72 /* struct thread_info */ 88 /* struct thread_info */
73 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 89 DEFINE(TI_TASK, offsetof(struct thread_info, task));
74 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
75 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
76 DEFINE(TI_STATUS, offsetof(struct thread_info, status));
77 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
78 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
79 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
80 DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block));
81 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
82 BLANK(); 93 BLANK();
83 94
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 4b0f0fdb9ca0..7de84923ba07 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt)
87 base_addr = early_uartlite_console(); 87 base_addr = early_uartlite_console();
88 if (base_addr) { 88 if (base_addr) {
89 early_console_initialized = 1; 89 early_console_initialized = 1;
90#ifdef CONFIG_MMU
91 early_console_reg_tlb_alloc(base_addr);
92#endif
90 early_printk("early_printk_console is enabled at 0x%08x\n", 93 early_printk("early_printk_console is enabled at 0x%08x\n",
91 base_addr); 94 base_addr);
92 95
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index f24b1268baaf..1fce6b803f54 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -10,7 +10,7 @@
10 10
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/thread_info.h> 12#include <asm/thread_info.h>
13#include <asm/errno.h> 13#include <linux/errno.h>
14#include <asm/entry.h> 14#include <asm/entry.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/registers.h> 16#include <asm/registers.h>
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
new file mode 100644
index 000000000000..91a0e7b185dd
--- /dev/null
+++ b/arch/microblaze/kernel/entry.S
@@ -0,0 +1,1116 @@
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34/* The size of a state save frame. */
35#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
36
37/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
39
40#define C_ENTRY(name) .globl name; .align 4; name
41
42/*
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
46 */
47#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48 .macro clear_bip
49 msrclr r11, MSR_BIP
50 nop
51 .endm
52
53 .macro set_bip
54 msrset r11, MSR_BIP
55 nop
56 .endm
57
58 .macro clear_eip
59 msrclr r11, MSR_EIP
60 nop
61 .endm
62
63 .macro set_ee
64 msrset r11, MSR_EE
65 nop
66 .endm
67
68 .macro disable_irq
69 msrclr r11, MSR_IE
70 nop
71 .endm
72
73 .macro enable_irq
74 msrset r11, MSR_IE
75 nop
76 .endm
77
78 .macro set_ums
79 msrset r11, MSR_UMS
80 nop
81 msrclr r11, MSR_VMS
82 nop
83 .endm
84
85 .macro set_vms
86 msrclr r11, MSR_UMS
87 nop
88 msrset r11, MSR_VMS
89 nop
90 .endm
91
92 .macro clear_vms_ums
93 msrclr r11, MSR_VMS
94 nop
95 msrclr r11, MSR_UMS
96 nop
97 .endm
98#else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON \
181 set_ums; \
182 rted r0, 2f; \
1832: nop;
184
185/* turn off virtual protected mode save and user mode save*/
186#define VM_OFF \
187 clear_vms_ums; \
188 rted r0, TOPHYS(1f); \
1891: nop;
190
191#define SAVE_REGS \
192 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
193 swi r5, r1, PTO+PT_R5; \
194 swi r6, r1, PTO+PT_R6; \
195 swi r7, r1, PTO+PT_R7; \
196 swi r8, r1, PTO+PT_R8; \
197 swi r9, r1, PTO+PT_R9; \
198 swi r10, r1, PTO+PT_R10; \
199 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
200 swi r12, r1, PTO+PT_R12; \
201 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
202 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
203 swi r15, r1, PTO+PT_R15; /* Save LP */ \
204 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
205 swi r19, r1, PTO+PT_R19; \
206 swi r20, r1, PTO+PT_R20; \
207 swi r21, r1, PTO+PT_R21; \
208 swi r22, r1, PTO+PT_R22; \
209 swi r23, r1, PTO+PT_R23; \
210 swi r24, r1, PTO+PT_R24; \
211 swi r25, r1, PTO+PT_R25; \
212 swi r26, r1, PTO+PT_R26; \
213 swi r27, r1, PTO+PT_R27; \
214 swi r28, r1, PTO+PT_R28; \
215 swi r29, r1, PTO+PT_R29; \
216 swi r30, r1, PTO+PT_R30; \
217 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
218 mfs r11, rmsr; /* save MSR */ \
219 nop; \
220 swi r11, r1, PTO+PT_MSR;
221
222#define RESTORE_REGS \
223 lwi r11, r1, PTO+PT_MSR; \
224 mts rmsr , r11; \
225 nop; \
226 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
227 lwi r5, r1, PTO+PT_R5; \
228 lwi r6, r1, PTO+PT_R6; \
229 lwi r7, r1, PTO+PT_R7; \
230 lwi r8, r1, PTO+PT_R8; \
231 lwi r9, r1, PTO+PT_R9; \
232 lwi r10, r1, PTO+PT_R10; \
233 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
234 lwi r12, r1, PTO+PT_R12; \
235 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
236 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
237 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
238 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
239 lwi r19, r1, PTO+PT_R19; \
240 lwi r20, r1, PTO+PT_R20; \
241 lwi r21, r1, PTO+PT_R21; \
242 lwi r22, r1, PTO+PT_R22; \
243 lwi r23, r1, PTO+PT_R23; \
244 lwi r24, r1, PTO+PT_R24; \
245 lwi r25, r1, PTO+PT_R25; \
246 lwi r26, r1, PTO+PT_R26; \
247 lwi r27, r1, PTO+PT_R27; \
248 lwi r28, r1, PTO+PT_R28; \
249 lwi r29, r1, PTO+PT_R29; \
250 lwi r30, r1, PTO+PT_R30; \
251 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
252
253.text
254
255/*
256 * User trap.
257 *
258 * System calls are handled here.
259 *
260 * Syscall protocol:
261 * Syscall number in r12, args in r5-r10
262 * Return value in r3
263 *
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
266 */
267C_ENTRY(_user_exception):
268 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269 addi r14, r14, 4 /* return address is 4 byte after call */
270 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
271
272 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273 beqi r11, 1f; /* Jump ahead if coming from user */
274/* Kernel-mode state save. */
275 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276 tophys(r1,r11);
277 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279
280 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281 SAVE_REGS
282
283 addi r11, r0, 1; /* Was in kernel-mode. */
284 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285 brid 2f;
286 nop; /* Fill delay slot */
287
288/* User-mode state save. */
2891:
290 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
291 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292 tophys(r1,r1);
293 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
294/* calculate kernel stack pointer from task struct 8k */
295 addik r1, r1, THREAD_SIZE;
296 tophys(r1,r1);
297
298 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
299 SAVE_REGS
300
301 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
302 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303 swi r11, r1, PTO+PT_R1; /* Store user SP. */
304 addi r11, r0, 1;
305 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3062: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
307 /* Save away the syscall number. */
308 swi r12, r1, PTO+PT_R0;
309 tovirt(r1,r1)
310
311 la r15, r0, ret_from_trap-8
312/* where the trap should return need -8 to adjust for rtsd r15, 8*/
313/* Jump to the appropriate function for the system call number in r12
314 * (r12 is not preserved), or return an error if r12 is not valid. The LP
315 * register should point to the location where
316 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
317 /* See if the system call number is valid. */
318 addi r11, r12, -__NR_syscalls;
319 bgei r11,1f;
320 /* Figure out which function to use for this system call. */
321 /* Note Microblaze barrel shift is optional, so don't rely on it */
322 add r12, r12, r12; /* convert num -> ptr */
323 add r12, r12, r12;
324
325 /* Trac syscalls and stored them to r0_ram */
326 lwi r3, r12, 0x400 + TOPHYS(r0_ram)
327 addi r3, r3, 1
328 swi r3, r12, 0x400 + TOPHYS(r0_ram)
329
330 lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */
331 /* Make the system call. to r12*/
332 set_vms;
333 rtid r12, 0;
334 nop;
335 /* The syscall number is invalid, return an error. */
3361: VM_ON; /* RETURN() expects virtual mode*/
337 addi r3, r0, -ENOSYS;
338 rtsd r15,8; /* looks like a normal subroutine return */
339 or r0, r0, r0
340
341
342/* Entry point used to return from a syscall/trap. */
343/* We re-enable BIP bit before state restore */
344C_ENTRY(ret_from_trap):
345 set_bip; /* Ints masked for state restore*/
346 lwi r11, r1, PTO+PT_MODE;
347/* See if returning to kernel mode, if so, skip resched &c. */
348 bnei r11, 2f;
349
350 /* We're returning to user mode, so check for various conditions that
351 * trigger rescheduling. */
352 /* Get current task ptr into r11 */
353 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
354 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
355 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
356 andi r11, r11, _TIF_NEED_RESCHED;
357 beqi r11, 5f;
358
359 swi r3, r1, PTO + PT_R3; /* store syscall result */
360 swi r4, r1, PTO + PT_R4;
361 bralid r15, schedule; /* Call scheduler */
362 nop; /* delay slot */
363 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
364 lwi r4, r1, PTO + PT_R4;
365
366 /* Maybe handle a signal */
3675: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
368 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
369 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
370 andi r11, r11, _TIF_SIGPENDING;
371 beqi r11, 1f; /* Signals to handle, handle them */
372
373 swi r3, r1, PTO + PT_R3; /* store syscall result */
374 swi r4, r1, PTO + PT_R4;
375 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
376 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
377 addi r7, r0, 1; /* Arg 3: int in_syscall */
378 bralid r15, do_signal; /* Handle any signals */
379 nop;
380 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
381 lwi r4, r1, PTO + PT_R4;
382
383/* Finally, return to user state. */
3841: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
385 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
386 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
387 VM_OFF;
388 tophys(r1,r1);
389 RESTORE_REGS;
390 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
391 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
392 bri 6f;
393
394/* Return to kernel state. */
3952: VM_OFF;
396 tophys(r1,r1);
397 RESTORE_REGS;
398 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
399 tovirt(r1,r1);
4006:
401TRAP_return: /* Make global symbol for debugging */
402 rtbd r14, 0; /* Instructions to return from an IRQ */
403 nop;
404
405
406/* These syscalls need access to the struct pt_regs on the stack, so we
407 implement them in assembly (they're basically all wrappers anyway). */
408
409C_ENTRY(sys_fork_wrapper):
410 addi r5, r0, SIGCHLD /* Arg 0: flags */
411 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
412 la r7, r1, PTO /* Arg 2: parent context */
413 add r8. r0, r0 /* Arg 3: (unused) */
414 add r9, r0, r0; /* Arg 4: (unused) */
415 add r10, r0, r0; /* Arg 5: (unused) */
416 brid do_fork /* Do real work (tail-call) */
417 nop;
418
419/* This the initial entry point for a new child thread, with an appropriate
420 stack in place that makes it look the the child is in the middle of an
421 syscall. This function is actually `returned to' from switch_thread
422 (copy_thread makes ret_from_fork the return address in each new thread's
423 saved context). */
424C_ENTRY(ret_from_fork):
425 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
426 add r3, r5, r0; /* switch_thread returns the prev task */
427 /* ( in the delay slot ) */
428 add r3, r0, r0; /* Child's fork call should return 0. */
429 brid ret_from_trap; /* Do normal trap return */
430 nop;
431
432C_ENTRY(sys_vfork_wrapper):
433 la r5, r1, PTO
434 brid sys_vfork /* Do real work (tail-call) */
435 nop
436
437C_ENTRY(sys_clone_wrapper):
438 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
439 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
4401: la r7, r1, PTO; /* Arg 2: parent context */
441 add r8, r0, r0; /* Arg 3: (unused) */
442 add r9, r0, r0; /* Arg 4: (unused) */
443 add r10, r0, r0; /* Arg 5: (unused) */
444 brid do_fork /* Do real work (tail-call) */
445 nop;
446
447C_ENTRY(sys_execve_wrapper):
448 la r8, r1, PTO; /* add user context as 4th arg */
449 brid sys_execve; /* Do real work (tail-call).*/
450 nop;
451
452C_ENTRY(sys_sigsuspend_wrapper):
453 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
454 swi r4, r1, PTO+PT_R4;
455 la r6, r1, PTO; /* add user context as 2nd arg */
456 bralid r15, sys_sigsuspend; /* Do real work.*/
457 nop;
458 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
459 lwi r4, r1, PTO+PT_R4;
460 bri ret_from_trap /* fall through will not work here due to align */
461 nop;
462
463C_ENTRY(sys_rt_sigsuspend_wrapper):
464 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
465 swi r4, r1, PTO+PT_R4;
466 la r7, r1, PTO; /* add user context as 3rd arg */
467 brlid r15, sys_rt_sigsuspend; /* Do real work.*/
468 nop;
469 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
470 lwi r4, r1, PTO+PT_R4;
471 bri ret_from_trap /* fall through will not work here due to align */
472 nop;
473
474
475C_ENTRY(sys_sigreturn_wrapper):
476 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
477 swi r4, r1, PTO+PT_R4;
478 la r5, r1, PTO; /* add user context as 1st arg */
479 brlid r15, sys_sigreturn; /* Do real work.*/
480 nop;
481 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
482 lwi r4, r1, PTO+PT_R4;
483 bri ret_from_trap /* fall through will not work here due to align */
484 nop;
485
486C_ENTRY(sys_rt_sigreturn_wrapper):
487 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
488 swi r4, r1, PTO+PT_R4;
489 la r5, r1, PTO; /* add user context as 1st arg */
490 brlid r15, sys_rt_sigreturn /* Do real work */
491 nop;
492 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
493 lwi r4, r1, PTO+PT_R4;
494 bri ret_from_trap /* fall through will not work here due to align */
495 nop;
496
497/*
498 * HW EXCEPTION rutine start
499 */
500
501#define SAVE_STATE \
502 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
503 set_bip; /*equalize initial state for all possible entries*/\
504 clear_eip; \
505 enable_irq; \
506 set_ee; \
507 /* See if already in kernel mode.*/ \
508 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
509 beqi r11, 1f; /* Jump ahead if coming from user */\
510 /* Kernel-mode state save. */ \
511 /* Reload kernel stack-ptr. */ \
512 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
513 tophys(r1,r11); \
514 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
515 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
516 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
517 /* store return registers separately because \
518 * this macros is use for others exceptions */ \
519 swi r3, r1, PTO + PT_R3; \
520 swi r4, r1, PTO + PT_R4; \
521 SAVE_REGS \
522 /* PC, before IRQ/trap - this is one instruction above */ \
523 swi r17, r1, PTO+PT_PC; \
524 \
525 addi r11, r0, 1; /* Was in kernel-mode. */ \
526 swi r11, r1, PTO+PT_MODE; \
527 brid 2f; \
528 nop; /* Fill delay slot */ \
5291: /* User-mode state save. */ \
530 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
531 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
532 tophys(r1,r1); \
533 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
534 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
535 tophys(r1,r1); \
536 \
537 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
538 /* store return registers separately because this macros \
539 * is use for others exceptions */ \
540 swi r3, r1, PTO + PT_R3; \
541 swi r4, r1, PTO + PT_R4; \
542 SAVE_REGS \
543 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
544 swi r17, r1, PTO+PT_PC; \
545 \
546 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
547 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
548 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
549 addi r11, r0, 1; \
550 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5512: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
552 /* Save away the syscall number. */ \
553 swi r0, r1, PTO+PT_R0; \
554 tovirt(r1,r1)
555
556C_ENTRY(full_exception_trap):
557 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
558 /* adjust exception address for privileged instruction
559 * for finding where is it */
560 addik r17, r17, -4
561 SAVE_STATE /* Save registers */
562 /* FIXME this can be store directly in PT_ESR reg.
563 * I tested it but there is a fault */
564 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
565 la r15, r0, ret_from_exc - 8
566 la r5, r1, PTO /* parameter struct pt_regs * regs */
567 mfs r6, resr
568 nop
569 mfs r7, rfsr; /* save FSR */
570 nop
571 la r12, r0, full_exception
572 set_vms;
573 rtbd r12, 0;
574 nop;
575
576/*
577 * Unaligned data trap.
578 *
579 * Unaligned data trap last on 4k page is handled here.
580 *
581 * Trap entered via exception, so EE bit is set, and interrupts
582 * are masked. This is nice, means we don't have to CLI before state save
583 *
584 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
585 */
586C_ENTRY(unaligned_data_trap):
587 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
588 SAVE_STATE /* Save registers.*/
589 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
590 la r15, r0, ret_from_exc-8
591 mfs r3, resr /* ESR */
592 nop
593 mfs r4, rear /* EAR */
594 nop
595 la r7, r1, PTO /* parameter struct pt_regs * regs */
596 la r12, r0, _unaligned_data_exception
597 set_vms;
598 rtbd r12, 0; /* interrupts enabled */
599 nop;
600
601/*
602 * Page fault traps.
603 *
604 * If the real exception handler (from hw_exception_handler.S) didn't find
605 * the mapping for the process, then we're thrown here to handle such situation.
606 *
607 * Trap entered via exceptions, so EE bit is set, and interrupts
608 * are masked. This is nice, means we don't have to CLI before state save
609 *
610 * Build a standard exception frame for TLB Access errors. All TLB exceptions
611 * will bail out to this point if they can't resolve the lightweight TLB fault.
612 *
613 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
614 * void do_page_fault(struct pt_regs *regs,
615 * unsigned long address,
616 * unsigned long error_code)
617 */
618/* data and intruction trap - which is choose is resolved int fault.c */
619C_ENTRY(page_fault_data_trap):
620 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
621 SAVE_STATE /* Save registers.*/
622 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
623 la r15, r0, ret_from_exc-8
624 la r5, r1, PTO /* parameter struct pt_regs * regs */
625 mfs r6, rear /* parameter unsigned long address */
626 nop
627 mfs r7, resr /* parameter unsigned long error_code */
628 nop
629 la r12, r0, do_page_fault
630 set_vms;
631 rtbd r12, 0; /* interrupts enabled */
632 nop;
633
634C_ENTRY(page_fault_instr_trap):
635 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
636 SAVE_STATE /* Save registers.*/
637 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
638 la r15, r0, ret_from_exc-8
639 la r5, r1, PTO /* parameter struct pt_regs * regs */
640 mfs r6, rear /* parameter unsigned long address */
641 nop
642 ori r7, r0, 0 /* parameter unsigned long error_code */
643 la r12, r0, do_page_fault
644 set_vms;
645 rtbd r12, 0; /* interrupts enabled */
646 nop;
647
648/* Entry point used to return from an exception. */
649C_ENTRY(ret_from_exc):
650 set_bip; /* Ints masked for state restore*/
651 lwi r11, r1, PTO+PT_MODE;
652 bnei r11, 2f; /* See if returning to kernel mode, */
653 /* ... if so, skip resched &c. */
654
655 /* We're returning to user mode, so check for various conditions that
656 trigger rescheduling. */
657 /* Get current task ptr into r11 */
658 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
659 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
660 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
661 andi r11, r11, _TIF_NEED_RESCHED;
662 beqi r11, 5f;
663
664/* Call the scheduler before returning from a syscall/trap. */
665 bralid r15, schedule; /* Call scheduler */
666 nop; /* delay slot */
667
668 /* Maybe handle a signal */
6695: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
670 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
671 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
672 andi r11, r11, _TIF_SIGPENDING;
673 beqi r11, 1f; /* Signals to handle, handle them */
674
675 /*
676 * Handle a signal return; Pending signals should be in r18.
677 *
678 * Not all registers are saved by the normal trap/interrupt entry
679 * points (for instance, call-saved registers (because the normal
680 * C-compiler calling sequence in the kernel makes sure they're
681 * preserved), and call-clobbered registers in the case of
682 * traps), but signal handlers may want to examine or change the
683 * complete register state. Here we save anything not saved by
684 * the normal entry sequence, so that it may be safely restored
685 * (in a possibly modified form) after do_signal returns.
686 * store return registers separately because this macros is use
687 * for others exceptions */
688 swi r3, r1, PTO + PT_R3;
689 swi r4, r1, PTO + PT_R4;
690 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
691 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
692 addi r7, r0, 0; /* Arg 3: int in_syscall */
693 bralid r15, do_signal; /* Handle any signals */
694 nop;
695 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
696 lwi r4, r1, PTO+PT_R4;
697
698/* Finally, return to user state. */
6991: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
700 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
701 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
702 VM_OFF;
703 tophys(r1,r1);
704
705 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
706 lwi r4, r1, PTO+PT_R4;
707 RESTORE_REGS;
708 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
709
710 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
711 bri 6f;
712/* Return to kernel state. */
7132: VM_OFF;
714 tophys(r1,r1);
715 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
716 lwi r4, r1, PTO+PT_R4;
717 RESTORE_REGS;
718 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
719
720 tovirt(r1,r1);
7216:
722EXC_return: /* Make global symbol for debugging */
723 rtbd r14, 0; /* Instructions to return from an IRQ */
724 nop;
725
726/*
727 * HW EXCEPTION rutine end
728 */
729
730/*
731 * Hardware maskable interrupts.
732 *
733 * The stack-pointer (r1) should have already been saved to the memory
734 * location PER_CPU(ENTRY_SP).
735 */
736C_ENTRY(_interrupt):
737/* MS: we are in physical address */
738/* Save registers, switch to proper stack, convert SP to virtual.*/
739 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
740 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
741 /* MS: See if already in kernel mode. */
742 lwi r11, r0, TOPHYS(PER_CPU(KM));
743 beqi r11, 1f; /* MS: Jump ahead if coming from user */
744
745/* Kernel-mode state save. */
746 or r11, r1, r0
747 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
748/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
749 swi r11, r1, (PT_R1 - PT_SIZE);
750/* MS: restore r11 because of saving in SAVE_REGS */
751 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
752 /* save registers */
753/* MS: Make room on the stack -> activation record */
754 addik r1, r1, -STATE_SAVE_SIZE;
755/* MS: store return registers separately because
756 * this macros is use for others exceptions */
757 swi r3, r1, PTO + PT_R3;
758 swi r4, r1, PTO + PT_R4;
759 SAVE_REGS
760 /* MS: store mode */
761 addi r11, r0, 1; /* MS: Was in kernel-mode. */
762 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
763 brid 2f;
764 nop; /* MS: Fill delay slot */
765
7661:
767/* User-mode state save. */
768/* MS: restore r11 -> FIXME move before SAVE_REG */
769 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
770 /* MS: get the saved current */
771 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
772 tophys(r1,r1);
773 lwi r1, r1, TS_THREAD_INFO;
774 addik r1, r1, THREAD_SIZE;
775 tophys(r1,r1);
776 /* save registers */
777 addik r1, r1, -STATE_SAVE_SIZE;
778 swi r3, r1, PTO+PT_R3;
779 swi r4, r1, PTO+PT_R4;
780 SAVE_REGS
781 /* calculate mode */
782 swi r0, r1, PTO + PT_MODE;
783 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
784 swi r11, r1, PTO+PT_R1;
785 /* setup kernel mode to KM */
786 addi r11, r0, 1;
787 swi r11, r0, TOPHYS(PER_CPU(KM));
788
7892:
790 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
791 swi r0, r1, PTO + PT_R0;
792 tovirt(r1,r1)
793 la r5, r1, PTO;
794 set_vms;
795 la r11, r0, do_IRQ;
796 la r15, r0, irq_call;
797irq_call:rtbd r11, 0;
798 nop;
799
800/* MS: we are in virtual mode */
801ret_from_irq:
802 lwi r11, r1, PTO + PT_MODE;
803 bnei r11, 2f;
804
805 add r11, r0, CURRENT_TASK;
806 lwi r11, r11, TS_THREAD_INFO;
807 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
808 andi r11, r11, _TIF_NEED_RESCHED;
809 beqi r11, 5f
810 bralid r15, schedule;
811 nop; /* delay slot */
812
813 /* Maybe handle a signal */
8145: add r11, r0, CURRENT_TASK;
815 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
816 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
817 andi r11, r11, _TIF_SIGPENDING;
818 beqid r11, no_intr_resched
819/* Handle a signal return; Pending signals should be in r18. */
820 addi r7, r0, 0; /* Arg 3: int in_syscall */
821 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
822 bralid r15, do_signal; /* Handle any signals */
823 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
824
825/* Finally, return to user state. */
826no_intr_resched:
827 /* Disable interrupts, we are now committed to the state restore */
828 disable_irq
829 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
830 add r11, r0, CURRENT_TASK;
831 swi r11, r0, PER_CPU(CURRENT_SAVE);
832 VM_OFF;
833 tophys(r1,r1);
834 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
835 lwi r4, r1, PTO + PT_R4;
836 RESTORE_REGS
837 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
838 lwi r1, r1, PT_R1 - PT_SIZE;
839 bri 6f;
840/* MS: Return to kernel state. */
8412: VM_OFF /* MS: turn off MMU */
842 tophys(r1,r1)
843 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
844 lwi r4, r1, PTO + PT_R4;
845 RESTORE_REGS
846 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
847 tovirt(r1,r1);
8486:
849IRQ_return: /* MS: Make global symbol for debugging */
850 rtid r14, 0
851 nop
852
853/*
854 * `Debug' trap
855 * We enter dbtrap in "BIP" (breakpoint) mode.
856 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
857 * original dbtrap.
858 * however, wait to save state first
859 */
860C_ENTRY(_debug_exception):
861 /* BIP bit is set on entry, no interrupts can occur */
862 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
863
864 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
865 set_bip; /*equalize initial state for all possible entries*/
866 clear_eip;
867 enable_irq;
868 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
869 beqi r11, 1f; /* Jump ahead if coming from user */
870 /* Kernel-mode state save. */
871 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
872 tophys(r1,r11);
873 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
874 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
875
876 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
877 swi r3, r1, PTO + PT_R3;
878 swi r4, r1, PTO + PT_R4;
879 SAVE_REGS;
880
881 addi r11, r0, 1; /* Was in kernel-mode. */
882 swi r11, r1, PTO + PT_MODE;
883 brid 2f;
884 nop; /* Fill delay slot */
8851: /* User-mode state save. */
886 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
887 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
888 tophys(r1,r1);
889 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
890 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
891 tophys(r1,r1);
892
893 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
894 swi r3, r1, PTO + PT_R3;
895 swi r4, r1, PTO + PT_R4;
896 SAVE_REGS;
897
898 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
899 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
900 swi r11, r1, PTO+PT_R1; /* Store user SP. */
901 addi r11, r0, 1;
902 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9032: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
904 /* Save away the syscall number. */
905 swi r0, r1, PTO+PT_R0;
906 tovirt(r1,r1)
907
908 addi r5, r0, SIGTRAP /* send the trap signal */
909 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
910 addk r7, r0, r0 /* 3rd param zero */
911
912 set_vms;
913 la r11, r0, send_sig;
914 la r15, r0, dbtrap_call;
915dbtrap_call: rtbd r11, 0;
916 nop;
917
918 set_bip; /* Ints masked for state restore*/
919 lwi r11, r1, PTO+PT_MODE;
920 bnei r11, 2f;
921
922 /* Get current task ptr into r11 */
923 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
924 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
925 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
926 andi r11, r11, _TIF_NEED_RESCHED;
927 beqi r11, 5f;
928
929/* Call the scheduler before returning from a syscall/trap. */
930
931 bralid r15, schedule; /* Call scheduler */
932 nop; /* delay slot */
933 /* XXX Is PT_DTRACE handling needed here? */
934 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
935
936 /* Maybe handle a signal */
9375: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
938 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
939 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
940 andi r11, r11, _TIF_SIGPENDING;
941 beqi r11, 1f; /* Signals to handle, handle them */
942
943/* Handle a signal return; Pending signals should be in r18. */
944 /* Not all registers are saved by the normal trap/interrupt entry
945 points (for instance, call-saved registers (because the normal
946 C-compiler calling sequence in the kernel makes sure they're
947 preserved), and call-clobbered registers in the case of
948 traps), but signal handlers may want to examine or change the
949 complete register state. Here we save anything not saved by
950 the normal entry sequence, so that it may be safely restored
951 (in a possibly modified form) after do_signal returns. */
952
953 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
954 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
955 addi r7, r0, 0; /* Arg 3: int in_syscall */
956 bralid r15, do_signal; /* Handle any signals */
957 nop;
958
959
960/* Finally, return to user state. */
9611: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
962 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
963 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
964 VM_OFF;
965 tophys(r1,r1);
966
967 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
968 lwi r4, r1, PTO+PT_R4;
969 RESTORE_REGS
970 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
971
972
973 lwi r1, r1, PT_R1 - PT_SIZE;
974 /* Restore user stack pointer. */
975 bri 6f;
976
977/* Return to kernel state. */
9782: VM_OFF;
979 tophys(r1,r1);
980 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
981 lwi r4, r1, PTO+PT_R4;
982 RESTORE_REGS
983 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
984
985 tovirt(r1,r1);
9866:
987DBTRAP_return: /* Make global symbol for debugging */
988 rtbd r14, 0; /* Instructions to return from an IRQ */
989 nop;
990
991
992
993ENTRY(_switch_to)
994 /* prepare return value */
995 addk r3, r0, r31
996
997 /* save registers in cpu_context */
998 /* use r11 and r12, volatile registers, as temp register */
999 /* give start of cpu_context for previous process */
1000 addik r11, r5, TI_CPU_CONTEXT
1001 swi r1, r11, CC_R1
1002 swi r2, r11, CC_R2
1003 /* skip volatile registers.
1004 * they are saved on stack when we jumped to _switch_to() */
1005 /* dedicated registers */
1006 swi r13, r11, CC_R13
1007 swi r14, r11, CC_R14
1008 swi r15, r11, CC_R15
1009 swi r16, r11, CC_R16
1010 swi r17, r11, CC_R17
1011 swi r18, r11, CC_R18
1012 /* save non-volatile registers */
1013 swi r19, r11, CC_R19
1014 swi r20, r11, CC_R20
1015 swi r21, r11, CC_R21
1016 swi r22, r11, CC_R22
1017 swi r23, r11, CC_R23
1018 swi r24, r11, CC_R24
1019 swi r25, r11, CC_R25
1020 swi r26, r11, CC_R26
1021 swi r27, r11, CC_R27
1022 swi r28, r11, CC_R28
1023 swi r29, r11, CC_R29
1024 swi r30, r11, CC_R30
1025 /* special purpose registers */
1026 mfs r12, rmsr
1027 nop
1028 swi r12, r11, CC_MSR
1029 mfs r12, rear
1030 nop
1031 swi r12, r11, CC_EAR
1032 mfs r12, resr
1033 nop
1034 swi r12, r11, CC_ESR
1035 mfs r12, rfsr
1036 nop
1037 swi r12, r11, CC_FSR
1038
1039 /* update r31, the current */
1040 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
1041 /* stored it to current_save too */
1042 swi r31, r0, PER_CPU(CURRENT_SAVE)
1043
1044 /* get new process' cpu context and restore */
1045 /* give me start where start context of next task */
1046 addik r11, r6, TI_CPU_CONTEXT
1047
1048 /* non-volatile registers */
1049 lwi r30, r11, CC_R30
1050 lwi r29, r11, CC_R29
1051 lwi r28, r11, CC_R28
1052 lwi r27, r11, CC_R27
1053 lwi r26, r11, CC_R26
1054 lwi r25, r11, CC_R25
1055 lwi r24, r11, CC_R24
1056 lwi r23, r11, CC_R23
1057 lwi r22, r11, CC_R22
1058 lwi r21, r11, CC_R21
1059 lwi r20, r11, CC_R20
1060 lwi r19, r11, CC_R19
1061 /* dedicated registers */
1062 lwi r18, r11, CC_R18
1063 lwi r17, r11, CC_R17
1064 lwi r16, r11, CC_R16
1065 lwi r15, r11, CC_R15
1066 lwi r14, r11, CC_R14
1067 lwi r13, r11, CC_R13
1068 /* skip volatile registers */
1069 lwi r2, r11, CC_R2
1070 lwi r1, r11, CC_R1
1071
1072 /* special purpose registers */
1073 lwi r12, r11, CC_FSR
1074 mts rfsr, r12
1075 nop
1076 lwi r12, r11, CC_MSR
1077 mts rmsr, r12
1078 nop
1079
1080 rtsd r15, 8
1081 nop
1082
1083ENTRY(_reset)
1084 brai 0x70; /* Jump back to FS-boot */
1085
1086ENTRY(_break)
1087 mfs r5, rmsr
1088 nop
1089 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1090 mfs r5, resr
1091 nop
1092 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1093 bri 0
1094
1095 /* These are compiled and loaded into high memory, then
1096 * copied into place in mach_early_setup */
1097 .section .init.ivt, "ax"
1098 .org 0x0
1099 /* this is very important - here is the reset vector */
1100 /* in current MMU branch you don't care what is here - it is
1101 * used from bootloader site - but this is correct for FS-BOOT */
1102 brai 0x70
1103 nop
1104 brai TOPHYS(_user_exception); /* syscall handler */
1105 brai TOPHYS(_interrupt); /* Interrupt handler */
1106 brai TOPHYS(_break); /* nmi trap handler */
1107 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1108
1109 .org 0x60
1110 brai TOPHYS(_debug_exception); /* debug trap handler*/
1111
1112.section .rodata,"a"
1113#include "syscall_table.S"
1114
1115syscall_table_size=(.-sys_call_table)
1116
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 4a8a4064c7ee..0cb64a31e89a 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -21,9 +21,9 @@
21 21
22#include <asm/exceptions.h> 22#include <asm/exceptions.h>
23#include <asm/entry.h> /* For KM CPU var */ 23#include <asm/entry.h> /* For KM CPU var */
24#include <asm/uaccess.h> 24#include <linux/uaccess.h>
25#include <asm/errno.h> 25#include <linux/errno.h>
26#include <asm/ptrace.h> 26#include <linux/ptrace.h>
27#include <asm/current.h> 27#include <asm/current.h>
28 28
29#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 29#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
@@ -31,7 +31,7 @@
31#define MICROBLAZE_DBUS_EXCEPTION 0x04 31#define MICROBLAZE_DBUS_EXCEPTION 0x04
32#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 32#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
33#define MICROBLAZE_FPU_EXCEPTION 0x06 33#define MICROBLAZE_FPU_EXCEPTION 0x06
34#define MICROBLAZE_PRIVILEG_EXCEPTION 0x07 34#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
35 35
36static DEFINE_SPINLOCK(die_lock); 36static DEFINE_SPINLOCK(die_lock);
37 37
@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
66asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, 66asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
67 int fsr, int addr) 67 int fsr, int addr)
68{ 68{
69#ifdef CONFIG_MMU
70 int code;
71 addr = regs->pc;
72#endif
73
69#if 0 74#if 0
70 printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", 75 printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
71 type, user_mode(regs) ? "user" : "kernel", fsr, 76 type, user_mode(regs) ? "user" : "kernel", fsr,
@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
74 79
75 switch (type & 0x1F) { 80 switch (type & 0x1F) {
76 case MICROBLAZE_ILL_OPCODE_EXCEPTION: 81 case MICROBLAZE_ILL_OPCODE_EXCEPTION:
77 _exception(SIGILL, regs, ILL_ILLOPC, addr); 82 if (user_mode(regs)) {
83 printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
84 _exception(SIGILL, regs, ILL_ILLOPC, addr);
85 return;
86 }
87 printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
88 die("opcode exception", regs, SIGBUS);
78 break; 89 break;
79 case MICROBLAZE_IBUS_EXCEPTION: 90 case MICROBLAZE_IBUS_EXCEPTION:
80 if (user_mode(regs)) { 91 if (user_mode(regs)) {
@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
95 die("bus exception", regs, SIGBUS); 106 die("bus exception", regs, SIGBUS);
96 break; 107 break;
97 case MICROBLAZE_DIV_ZERO_EXCEPTION: 108 case MICROBLAZE_DIV_ZERO_EXCEPTION:
98 printk(KERN_WARNING "Divide by zero exception\n"); 109 if (user_mode(regs)) {
99 _exception(SIGILL, regs, ILL_ILLOPC, addr); 110 printk(KERN_WARNING "Divide by zero exception in user mode\n");
111 _exception(SIGILL, regs, ILL_ILLOPC, addr);
112 return;
113 }
114 printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
115 die("Divide by exception", regs, SIGBUS);
100 break; 116 break;
101
102 case MICROBLAZE_FPU_EXCEPTION: 117 case MICROBLAZE_FPU_EXCEPTION:
118 printk(KERN_WARNING "FPU exception\n");
103 /* IEEE FP exception */ 119 /* IEEE FP exception */
104 /* I removed fsr variable and use code var for storing fsr */ 120 /* I removed fsr variable and use code var for storing fsr */
105 if (fsr & FSR_IO) 121 if (fsr & FSR_IO)
@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
115 _exception(SIGFPE, regs, fsr, addr); 131 _exception(SIGFPE, regs, fsr, addr);
116 break; 132 break;
117 133
134#ifdef CONFIG_MMU
135 case MICROBLAZE_PRIVILEGED_EXCEPTION:
136 printk(KERN_WARNING "Privileged exception\n");
137 /* "brk r0,r0" - used as debug breakpoint */
138 if (get_user(code, (unsigned long *)regs->pc) == 0
139 && code == 0x980c0000) {
140 _exception(SIGTRAP, regs, TRAP_BRKPT, addr);
141 } else {
142 _exception(SIGILL, regs, ILL_PRVOPC, addr);
143 }
144 break;
145#endif
118 default: 146 default:
147 /* FIXME what to do in unexpected exception */
119 printk(KERN_WARNING "Unexpected exception %02x " 148 printk(KERN_WARNING "Unexpected exception %02x "
120 "PC=%08x in %s mode\n", type, (unsigned int) addr, 149 "PC=%08x in %s mode\n", type, (unsigned int) addr,
121 kernel_mode(regs) ? "kernel" : "user"); 150 kernel_mode(regs) ? "kernel" : "user");
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 319dc35fc922..e568d6ec621b 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -3,6 +3,26 @@
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * MMU code derived from arch/ppc/kernel/head_4xx.S:
7 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
8 * Initial PowerPC version.
9 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Rewritten for PReP
11 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
12 * Low-level exception handers, MMU support, and rewrite.
13 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
14 * PowerPC 8xx modifications.
15 * Copyright (c) 1998-1999 TiVo, Inc.
16 * PowerPC 403GCX modifications.
17 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
18 * PowerPC 403GCX/405GP modifications.
19 * Copyright 2000 MontaVista Software Inc.
20 * PPC405 modifications
21 * PowerPC 403GCX/405GP modifications.
22 * Author: MontaVista Software, Inc.
23 * frank_rowand@mvista.com or source@mvista.com
24 * debbie_chu@mvista.com
25 *
6 * This file is subject to the terms and conditions of the GNU General Public 26 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 27 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 28 * for more details.
@@ -12,6 +32,22 @@
12#include <asm/thread_info.h> 32#include <asm/thread_info.h>
13#include <asm/page.h> 33#include <asm/page.h>
14 34
35#ifdef CONFIG_MMU
36#include <asm/setup.h> /* COMMAND_LINE_SIZE */
37#include <asm/mmu.h>
38#include <asm/processor.h>
39
40.data
41.global empty_zero_page
42.align 12
43empty_zero_page:
44 .space 4096
45.global swapper_pg_dir
46swapper_pg_dir:
47 .space 4096
48
49#endif /* CONFIG_MMU */
50
15 .text 51 .text
16ENTRY(_start) 52ENTRY(_start)
17 mfs r1, rmsr 53 mfs r1, rmsr
@@ -32,6 +68,123 @@ _copy_fdt:
32 addik r3, r3, -4 /* descrement loop */ 68 addik r3, r3, -4 /* descrement loop */
33no_fdt_arg: 69no_fdt_arg:
34 70
71#ifdef CONFIG_MMU
72
73#ifndef CONFIG_CMDLINE_BOOL
74/*
75 * handling command line
76 * copy command line to __init_end. There is space for storing command line.
77 */
78 or r6, r0, r0 /* incremment */
79 ori r4, r0, __init_end /* load address of command line */
80 tophys(r4,r4) /* convert to phys address */
81 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
82_copy_command_line:
83 lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
84 sb r7, r4, r6 /* addr[r4+r6]= r7*/
85 addik r6, r6, 1 /* increment counting */
86 bgtid r3, _copy_command_line /* loop for all entries */
87 addik r3, r3, -1 /* descrement loop */
88 addik r5, r4, 0 /* add new space for command line */
89 tovirt(r5,r5)
90#endif /* CONFIG_CMDLINE_BOOL */
91
92#ifdef NOT_COMPILE
93/* save bram context */
94 or r6, r0, r0 /* incremment */
95 ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
96 ori r3, r0, (LMB_SIZE - 4)
97_copy_bram:
98 lw r7, r0, r6 /* r7 = r0 + r6 */
99 sw r7, r4, r6 /* addr[r4 + r6] = r7*/
100 addik r6, r6, 4 /* increment counting */
101 bgtid r3, _copy_bram /* loop for all entries */
102 addik r3, r3, -4 /* descrement loop */
103#endif
104 /* We have to turn on the MMU right away. */
105
106 /*
107 * Set up the initial MMU state so we can do the first level of
108 * kernel initialization. This maps the first 16 MBytes of memory 1:1
109 * virtual to physical.
110 */
111 nop
112 addik r3, r0, 63 /* Invalidate all TLB entries */
113_invalidate:
114 mts rtlbx, r3
115 mts rtlbhi, r0 /* flush: ensure V is clear */
116 bgtid r3, _invalidate /* loop for all entries */
117 addik r3, r3, -1
118 /* sync */
119
120 /*
121 * We should still be executing code at physical address area
122 * RAM_BASEADDR at this point. However, kernel code is at
123 * a virtual address. So, set up a TLB mapping to cover this once
124 * translation is enabled.
125 */
126
127 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
128 tophys(r4,r3) /* Load the kernel physical address */
129
130 mts rpid,r0 /* Load the kernel PID */
131 nop
132 bri 4
133
134 /*
135 * Configure and load two entries into TLB slots 0 and 1.
136 * In case we are pinning TLBs, these are reserved in by the
137 * other TLB functions. If not reserving, then it doesn't
138 * matter where they are loaded.
139 */
140 andi r4,r4,0xfffffc00 /* Mask off the real page number */
141 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
142
143 andi r3,r3,0xfffffc00 /* Mask off the effective page number */
144 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
145
146 mts rtlbx,r0 /* TLB slow 0 */
147
148 mts rtlblo,r4 /* Load the data portion of the entry */
149 mts rtlbhi,r3 /* Load the tag portion of the entry */
150
151 addik r4, r4, 0x01000000 /* Map next 16 M entries */
152 addik r3, r3, 0x01000000
153
154 ori r6,r0,1 /* TLB slot 1 */
155 mts rtlbx,r6
156
157 mts rtlblo,r4 /* Load the data portion of the entry */
158 mts rtlbhi,r3 /* Load the tag portion of the entry */
159
160 /*
161 * Load a TLB entry for LMB, since we need access to
162 * the exception vectors, using a 4k real==virtual mapping.
163 */
164 ori r6,r0,3 /* TLB slot 3 */
165 mts rtlbx,r6
166
167 ori r4,r0,(TLB_WR | TLB_EX)
168 ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
169
170 mts rtlblo,r4 /* Load the data portion of the entry */
171 mts rtlbhi,r3 /* Load the tag portion of the entry */
172
173 /*
174 * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
175 * caches ready to work.
176 */
177turn_on_mmu:
178 ori r15,r0,start_here
179 ori r4,r0,MSR_KERNEL_VMS
180 mts rmsr,r4
181 nop
182 rted r15,0 /* enables MMU */
183 nop
184
185start_here:
186#endif /* CONFIG_MMU */
187
35 /* Initialize small data anchors */ 188 /* Initialize small data anchors */
36 la r13, r0, _KERNEL_SDA_BASE_ 189 la r13, r0, _KERNEL_SDA_BASE_
37 la r2, r0, _KERNEL_SDA2_BASE_ 190 la r2, r0, _KERNEL_SDA2_BASE_
@@ -51,6 +204,43 @@ no_fdt_arg:
51 brald r15, r8 204 brald r15, r8
52 nop 205 nop
53 206
207#ifndef CONFIG_MMU
54 la r15, r0, machine_halt 208 la r15, r0, machine_halt
55 braid start_kernel 209 braid start_kernel
56 nop 210 nop
211#else
212 /*
213 * Initialize the MMU.
214 */
215 bralid r15, mmu_init
216 nop
217
218 /* Go back to running unmapped so we can load up new values
219 * and change to using our exception vectors.
220 * On the MicroBlaze, all we invalidate the used TLB entries to clear
221 * the old 16M byte TLB mappings.
222 */
223 ori r15,r0,TOPHYS(kernel_load_context)
224 ori r4,r0,MSR_KERNEL
225 mts rmsr,r4
226 nop
227 bri 4
228 rted r15,0
229 nop
230
231 /* Load up the kernel context */
232kernel_load_context:
233 # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
234 ori r5,r0,3
235 mts rtlbx,r5
236 nop
237 mts rtlbhi,r0
238 nop
239 addi r15, r0, machine_halt
240 ori r17, r0, start_kernel
241 ori r4, r0, MSR_KERNEL_VMS
242 mts rmsr, r4
243 nop
244 rted r17, 0 /* enable MMU and jump to start_kernel */
245 nop
246#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index cf9486d99838..9d591cd74fc2 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -53,6 +53,12 @@
53 * - Illegal instruction opcode 53 * - Illegal instruction opcode
54 * - Divide-by-zero 54 * - Divide-by-zero
55 * 55 *
56 * - Privileged instruction exception (MMU)
57 * - Data storage exception (MMU)
58 * - Instruction storage exception (MMU)
59 * - Data TLB miss exception (MMU)
60 * - Instruction TLB miss exception (MMU)
61 *
56 * Note we disable interrupts during exception handling, otherwise we will 62 * Note we disable interrupts during exception handling, otherwise we will
57 * possibly get multiple re-entrancy if interrupt handles themselves cause 63 * possibly get multiple re-entrancy if interrupt handles themselves cause
58 * exceptions. JW 64 * exceptions. JW
@@ -71,9 +77,24 @@
71#include <asm/asm-offsets.h> 77#include <asm/asm-offsets.h>
72 78
73/* Helpful Macros */ 79/* Helpful Macros */
80#ifndef CONFIG_MMU
74#define EX_HANDLER_STACK_SIZ (4*19) 81#define EX_HANDLER_STACK_SIZ (4*19)
82#endif
75#define NUM_TO_REG(num) r ## num 83#define NUM_TO_REG(num) r ## num
76 84
85#ifdef CONFIG_MMU
86/* FIXME you can't change first load of MSR because there is
87 * hardcoded jump bri 4 */
88 #define RESTORE_STATE \
89 lwi r3, r1, PT_R3; \
90 lwi r4, r1, PT_R4; \
91 lwi r5, r1, PT_R5; \
92 lwi r6, r1, PT_R6; \
93 lwi r11, r1, PT_R11; \
94 lwi r31, r1, PT_R31; \
95 lwi r1, r0, TOPHYS(r0_ram + 0);
96#endif /* CONFIG_MMU */
97
77#define LWREG_NOP \ 98#define LWREG_NOP \
78 bri ex_handler_unhandled; \ 99 bri ex_handler_unhandled; \
79 nop; 100 nop;
@@ -106,6 +127,54 @@
106 or r3, r0, NUM_TO_REG (regnum); \ 127 or r3, r0, NUM_TO_REG (regnum); \
107 bri ex_sw_tail; 128 bri ex_sw_tail;
108 129
130#ifdef CONFIG_MMU
131 #define R3_TO_LWREG_VM_V(regnum) \
132 brid ex_lw_end_vm; \
133 swi r3, r7, 4 * regnum;
134
135 #define R3_TO_LWREG_VM(regnum) \
136 brid ex_lw_end_vm; \
137 or NUM_TO_REG (regnum), r0, r3;
138
139 #define SWREG_TO_R3_VM_V(regnum) \
140 brid ex_sw_tail_vm; \
141 lwi r3, r7, 4 * regnum;
142
143 #define SWREG_TO_R3_VM(regnum) \
144 brid ex_sw_tail_vm; \
145 or r3, r0, NUM_TO_REG (regnum);
146
147 /* Shift right instruction depending on available configuration */
148 #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
149 #define BSRLI(rD, rA, imm) \
150 bsrli rD, rA, imm
151 #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
152 #define BSRLI(rD, rA, imm) \
153 ori rD, r0, (1 << imm); \
154 idivu rD, rD, rA
155 #else
156 #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
157 /* Only the used shift constants defined here - add more if needed */
158 #define BSRLI2(rD, rA) \
159 srl rD, rA; /* << 1 */ \
160 srl rD, rD; /* << 2 */
161 #define BSRLI10(rD, rA) \
162 srl rD, rA; /* << 1 */ \
163 srl rD, rD; /* << 2 */ \
164 srl rD, rD; /* << 3 */ \
165 srl rD, rD; /* << 4 */ \
166 srl rD, rD; /* << 5 */ \
167 srl rD, rD; /* << 6 */ \
168 srl rD, rD; /* << 7 */ \
169 srl rD, rD; /* << 8 */ \
170 srl rD, rD; /* << 9 */ \
171 srl rD, rD /* << 10 */
172 #define BSRLI20(rD, rA) \
173 BSRLI10(rD, rA); \
174 BSRLI10(rD, rD)
175 #endif
176#endif /* CONFIG_MMU */
177
109.extern other_exception_handler /* Defined in exception.c */ 178.extern other_exception_handler /* Defined in exception.c */
110 179
111/* 180/*
@@ -163,34 +232,119 @@
163 232
164/* wrappers to restore state before coming to entry.S */ 233/* wrappers to restore state before coming to entry.S */
165 234
235#ifdef CONFIG_MMU
236.section .rodata
237.align 4
238_MB_HW_ExceptionVectorTable:
239/* 0 - Undefined */
240 .long TOPHYS(ex_handler_unhandled)
241/* 1 - Unaligned data access exception */
242 .long TOPHYS(handle_unaligned_ex)
243/* 2 - Illegal op-code exception */
244 .long TOPHYS(full_exception_trapw)
245/* 3 - Instruction bus error exception */
246 .long TOPHYS(full_exception_trapw)
247/* 4 - Data bus error exception */
248 .long TOPHYS(full_exception_trapw)
249/* 5 - Divide by zero exception */
250 .long TOPHYS(full_exception_trapw)
251/* 6 - Floating point unit exception */
252 .long TOPHYS(full_exception_trapw)
253/* 7 - Privileged instruction exception */
254 .long TOPHYS(full_exception_trapw)
255/* 8 - 15 - Undefined */
256 .long TOPHYS(ex_handler_unhandled)
257 .long TOPHYS(ex_handler_unhandled)
258 .long TOPHYS(ex_handler_unhandled)
259 .long TOPHYS(ex_handler_unhandled)
260 .long TOPHYS(ex_handler_unhandled)
261 .long TOPHYS(ex_handler_unhandled)
262 .long TOPHYS(ex_handler_unhandled)
263 .long TOPHYS(ex_handler_unhandled)
264/* 16 - Data storage exception */
265 .long TOPHYS(handle_data_storage_exception)
266/* 17 - Instruction storage exception */
267 .long TOPHYS(handle_instruction_storage_exception)
268/* 18 - Data TLB miss exception */
269 .long TOPHYS(handle_data_tlb_miss_exception)
270/* 19 - Instruction TLB miss exception */
271 .long TOPHYS(handle_instruction_tlb_miss_exception)
272/* 20 - 31 - Undefined */
273 .long TOPHYS(ex_handler_unhandled)
274 .long TOPHYS(ex_handler_unhandled)
275 .long TOPHYS(ex_handler_unhandled)
276 .long TOPHYS(ex_handler_unhandled)
277 .long TOPHYS(ex_handler_unhandled)
278 .long TOPHYS(ex_handler_unhandled)
279 .long TOPHYS(ex_handler_unhandled)
280 .long TOPHYS(ex_handler_unhandled)
281 .long TOPHYS(ex_handler_unhandled)
282 .long TOPHYS(ex_handler_unhandled)
283 .long TOPHYS(ex_handler_unhandled)
284 .long TOPHYS(ex_handler_unhandled)
285#endif
286
166.global _hw_exception_handler 287.global _hw_exception_handler
167.section .text 288.section .text
168.align 4 289.align 4
169.ent _hw_exception_handler 290.ent _hw_exception_handler
170_hw_exception_handler: 291_hw_exception_handler:
292#ifndef CONFIG_MMU
171 addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ 293 addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
294#else
295 swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */
296 /* Save date to kernel memory. Here is the problem
297 * when you came from user space */
298 ori r1, r0, TOPHYS(r0_ram + 28);
299#endif
172 swi r3, r1, PT_R3 300 swi r3, r1, PT_R3
173 swi r4, r1, PT_R4 301 swi r4, r1, PT_R4
174 swi r5, r1, PT_R5 302 swi r5, r1, PT_R5
175 swi r6, r1, PT_R6 303 swi r6, r1, PT_R6
176 304
177 mfs r5, rmsr; 305#ifdef CONFIG_MMU
178 nop 306 swi r11, r1, PT_R11
179 swi r5, r1, 0; 307 swi r31, r1, PT_R31
180 mfs r4, rbtr /* Save BTR before jumping to handler */ 308 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
181 nop 309#endif
310
182 mfs r3, resr 311 mfs r3, resr
183 nop 312 nop
313 mfs r4, rear;
314 nop
184 315
316#ifndef CONFIG_MMU
185 andi r5, r3, 0x1000; /* Check ESR[DS] */ 317 andi r5, r3, 0x1000; /* Check ESR[DS] */
186 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 318 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
187 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 319 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
188 nop 320 nop
189not_in_delay_slot: 321not_in_delay_slot:
190 swi r17, r1, PT_R17 322 swi r17, r1, PT_R17
323#endif
191 324
192 andi r5, r3, 0x1F; /* Extract ESR[EXC] */ 325 andi r5, r3, 0x1F; /* Extract ESR[EXC] */
193 326
327#ifdef CONFIG_MMU
328 /* Calculate exception vector offset = r5 << 2 */
329 addk r6, r5, r5; /* << 1 */
330 addk r6, r6, r6; /* << 2 */
331
332/* counting which exception happen */
333 lwi r5, r0, 0x200 + TOPHYS(r0_ram)
334 addi r5, r5, 1
335 swi r5, r0, 0x200 + TOPHYS(r0_ram)
336 lwi r5, r6, 0x200 + TOPHYS(r0_ram)
337 addi r5, r5, 1
338 swi r5, r6, 0x200 + TOPHYS(r0_ram)
339/* end */
340 /* Load the HW Exception vector */
341 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
342 bra r6
343
344full_exception_trapw:
345 RESTORE_STATE
346 bri full_exception_trap
347#else
194 /* Exceptions enabled here. This will allow nested exceptions */ 348 /* Exceptions enabled here. This will allow nested exceptions */
195 mfs r6, rmsr; 349 mfs r6, rmsr;
196 nop 350 nop
@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */
254 lwi r18, r1, PT_R18 408 lwi r18, r1, PT_R18
255 409
256 bri ex_handler_done; /* Complete exception handling */ 410 bri ex_handler_done; /* Complete exception handling */
411#endif
257 412
258/* 0x01 - Unaligned data access exception 413/* 0x01 - Unaligned data access exception
259 * This occurs when a word access is not aligned on a word boundary, 414 * This occurs when a word access is not aligned on a word boundary,
@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */
265handle_unaligned_ex: 420handle_unaligned_ex:
266 /* Working registers already saved: R3, R4, R5, R6 421 /* Working registers already saved: R3, R4, R5, R6
267 * R3 = ESR 422 * R3 = ESR
268 * R4 = BTR 423 * R4 = EAR
269 */ 424 */
270 mfs r4, rear; 425#ifdef CONFIG_MMU
426 andi r6, r3, 0x1000 /* Check ESR[DS] */
427 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
428 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
271 nop 429 nop
430_no_delayslot:
431#endif
432
433#ifdef CONFIG_MMU
434 /* Check if unaligned address is last on a 4k page */
435 andi r5, r4, 0xffc
436 xori r5, r5, 0xffc
437 bnei r5, _unaligned_ex2
438 _unaligned_ex1:
439 RESTORE_STATE;
440/* Another page must be accessed or physical address not in page table */
441 bri unaligned_data_trap
272 442
443 _unaligned_ex2:
444#endif
273 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 445 andi r6, r3, 0x3E0; /* Mask and extract the register operand */
274 srl r6, r6; /* r6 >> 5 */ 446 srl r6, r6; /* r6 >> 5 */
275 srl r6, r6; 447 srl r6, r6;
@@ -278,6 +450,45 @@ handle_unaligned_ex:
278 srl r6, r6; 450 srl r6, r6;
279 /* Store the register operand in a temporary location */ 451 /* Store the register operand in a temporary location */
280 sbi r6, r0, TOPHYS(ex_reg_op); 452 sbi r6, r0, TOPHYS(ex_reg_op);
453#ifdef CONFIG_MMU
454 /* Get physical address */
455 /* If we are faulting a kernel address, we have to use the
456 * kernel page tables.
457 */
458 ori r5, r0, CONFIG_KERNEL_START
459 cmpu r5, r4, r5
460 bgti r5, _unaligned_ex3
461 ori r5, r0, swapper_pg_dir
462 bri _unaligned_ex4
463
464 /* Get the PGD for the current thread. */
465_unaligned_ex3: /* user thread */
466 addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
467 lwi r5, r5, TASK_THREAD + PGDIR
468_unaligned_ex4:
469 tophys(r5,r5)
470 BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
471 andi r6, r6, 0xffc
472/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
473 or r5, r5, r6
474 lwi r6, r5, 0 /* Get L1 entry */
475 andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
476 beqi r5, _unaligned_ex1 /* Bail if no table */
477
478 tophys(r5,r5)
479 BSRLI(r6,r4,10) /* Compute PTE address */
480 andi r6, r6, 0xffc
481 andi r5, r5, 0xfffff003
482 or r5, r5, r6
483 lwi r5, r5, 0 /* Get Linux PTE */
484
485 andi r6, r5, _PAGE_PRESENT
486 beqi r6, _unaligned_ex1 /* Bail if no page */
487
488 andi r5, r5, 0xfffff000 /* Extract RPN */
489 andi r4, r4, 0x00000fff /* Extract offset */
490 or r4, r4, r5 /* Create physical address */
491#endif /* CONFIG_MMU */
281 492
282 andi r6, r3, 0x400; /* Extract ESR[S] */ 493 andi r6, r3, 0x400; /* Extract ESR[S] */
283 bnei r6, ex_sw; 494 bnei r6, ex_sw;
@@ -355,6 +566,7 @@ ex_shw:
355ex_sw_end: /* Exception handling of store word, ends. */ 566ex_sw_end: /* Exception handling of store word, ends. */
356 567
357ex_handler_done: 568ex_handler_done:
569#ifndef CONFIG_MMU
358 lwi r5, r1, 0 /* RMSR */ 570 lwi r5, r1, 0 /* RMSR */
359 mts rmsr, r5 571 mts rmsr, r5
360 nop 572 nop
@@ -366,13 +578,455 @@ ex_handler_done:
366 578
367 rted r17, 0 579 rted r17, 0
368 addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ 580 addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
581#else
582 RESTORE_STATE;
583 rted r17, 0
584 nop
585#endif
586
587#ifdef CONFIG_MMU
588 /* Exception vector entry code. This code runs with address translation
589 * turned off (i.e. using physical addresses). */
590
591 /* Exception vectors. */
592
593 /* 0x10 - Data Storage Exception
594 * This happens for just a few reasons. U0 set (but we don't do that),
595 * or zone protection fault (user violation, write to protected page).
596 * If this is just an update of modified status, we do that quickly
597 * and exit. Otherwise, we call heavyweight functions to do the work.
598 */
599 handle_data_storage_exception:
600 /* Working registers already saved: R3, R4, R5, R6
601 * R3 = ESR
602 */
603 mfs r11, rpid
604 nop
605 bri 4
606 mfs r3, rear /* Get faulting address */
607 nop
608 /* If we are faulting a kernel address, we have to use the
609 * kernel page tables.
610 */
611 ori r4, r0, CONFIG_KERNEL_START
612 cmpu r4, r3, r4
613 bgti r4, ex3
614 /* First, check if it was a zone fault (which means a user
615 * tried to access a kernel or read-protected page - always
616 * a SEGV). All other faults here must be stores, so no
617 * need to check ESR_S as well. */
618 mfs r4, resr
619 nop
620 andi r4, r4, 0x800 /* ESR_Z - zone protection */
621 bnei r4, ex2
622
623 ori r4, r0, swapper_pg_dir
624 mts rpid, r0 /* TLB will have 0 TID */
625 nop
626 bri ex4
627
628 /* Get the PGD for the current thread. */
629 ex3:
630 /* First, check if it was a zone fault (which means a user
631 * tried to access a kernel or read-protected page - always
632 * a SEGV). All other faults here must be stores, so no
633 * need to check ESR_S as well. */
634 mfs r4, resr
635 nop
636 andi r4, r4, 0x800 /* ESR_Z */
637 bnei r4, ex2
638 /* get current task address */
639 addi r4 ,CURRENT_TASK, TOPHYS(0);
640 lwi r4, r4, TASK_THREAD+PGDIR
641 ex4:
642 tophys(r4,r4)
643 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
644 andi r5, r5, 0xffc
645/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
646 or r4, r4, r5
647 lwi r4, r4, 0 /* Get L1 entry */
648 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
649 beqi r5, ex2 /* Bail if no table */
650
651 tophys(r5,r5)
652 BSRLI(r6,r3,10) /* Compute PTE address */
653 andi r6, r6, 0xffc
654 andi r5, r5, 0xfffff003
655 or r5, r5, r6
656 lwi r4, r5, 0 /* Get Linux PTE */
657
658 andi r6, r4, _PAGE_RW /* Is it writeable? */
659 beqi r6, ex2 /* Bail if not */
660
661 /* Update 'changed' */
662 ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
663 swi r4, r5, 0 /* Update Linux page table */
664
665 /* Most of the Linux PTE is ready to load into the TLB LO.
666 * We set ZSEL, where only the LS-bit determines user access.
667 * We set execute, because we don't have the granularity to
668 * properly set this at the page level (Linux problem).
669 * If shared is set, we cause a zero PID->TID load.
670 * Many of these bits are software only. Bits we don't set
671 * here we (properly should) assume have the appropriate value.
672 */
673 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
674 ori r4, r4, _PAGE_HWEXEC /* make it executable */
675
676 /* find the TLB index that caused the fault. It has to be here*/
677 mts rtlbsx, r3
678 nop
679 mfs r5, rtlbx /* DEBUG: TBD */
680 nop
681 mts rtlblo, r4 /* Load TLB LO */
682 nop
683 /* Will sync shadow TLBs */
684
685 /* Done...restore registers and get out of here. */
686 mts rpid, r11
687 nop
688 bri 4
689
690 RESTORE_STATE;
691 rted r17, 0
692 nop
693 ex2:
694 /* The bailout. Restore registers to pre-exception conditions
695 * and call the heavyweights to help us out. */
696 mts rpid, r11
697 nop
698 bri 4
699 RESTORE_STATE;
700 bri page_fault_data_trap
701
702
703 /* 0x11 - Instruction Storage Exception
704 * This is caused by a fetch from non-execute or guarded pages. */
705 handle_instruction_storage_exception:
706 /* Working registers already saved: R3, R4, R5, R6
707 * R3 = ESR
708 */
709
710 mfs r3, rear /* Get faulting address */
711 nop
712 RESTORE_STATE;
713 bri page_fault_instr_trap
714
715 /* 0x12 - Data TLB Miss Exception
716 * As the name implies, translation is not in the MMU, so search the
717 * page tables and fix it. The only purpose of this function is to
718 * load TLB entries from the page table if they exist.
719 */
720 handle_data_tlb_miss_exception:
721 /* Working registers already saved: R3, R4, R5, R6
722 * R3 = ESR
723 */
724 mfs r11, rpid
725 nop
726 bri 4
727 mfs r3, rear /* Get faulting address */
728 nop
729
730 /* If we are faulting a kernel address, we have to use the
731 * kernel page tables. */
732 ori r4, r0, CONFIG_KERNEL_START
733 cmpu r4, r3, r4
734 bgti r4, ex5
735 ori r4, r0, swapper_pg_dir
736 mts rpid, r0 /* TLB will have 0 TID */
737 nop
738 bri ex6
369 739
740 /* Get the PGD for the current thread. */
741 ex5:
742 /* get current task address */
743 addi r4 ,CURRENT_TASK, TOPHYS(0);
744 lwi r4, r4, TASK_THREAD+PGDIR
745 ex6:
746 tophys(r4,r4)
747 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
748 andi r5, r5, 0xffc
749/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
750 or r4, r4, r5
751 lwi r4, r4, 0 /* Get L1 entry */
752 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
753 beqi r5, ex7 /* Bail if no table */
754
755 tophys(r5,r5)
756 BSRLI(r6,r3,10) /* Compute PTE address */
757 andi r6, r6, 0xffc
758 andi r5, r5, 0xfffff003
759 or r5, r5, r6
760 lwi r4, r5, 0 /* Get Linux PTE */
761
762 andi r6, r4, _PAGE_PRESENT
763 beqi r6, ex7
764
765 ori r4, r4, _PAGE_ACCESSED
766 swi r4, r5, 0
767
768 /* Most of the Linux PTE is ready to load into the TLB LO.
769 * We set ZSEL, where only the LS-bit determines user access.
770 * We set execute, because we don't have the granularity to
771 * properly set this at the page level (Linux problem).
772 * If shared is set, we cause a zero PID->TID load.
773 * Many of these bits are software only. Bits we don't set
774 * here we (properly should) assume have the appropriate value.
775 */
776 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
777
778 bri finish_tlb_load
779 ex7:
780 /* The bailout. Restore registers to pre-exception conditions
781 * and call the heavyweights to help us out.
782 */
783 mts rpid, r11
784 nop
785 bri 4
786 RESTORE_STATE;
787 bri page_fault_data_trap
788
789 /* 0x13 - Instruction TLB Miss Exception
790 * Nearly the same as above, except we get our information from
791 * different registers and bailout to a different point.
792 */
793 handle_instruction_tlb_miss_exception:
794 /* Working registers already saved: R3, R4, R5, R6
795 * R3 = ESR
796 */
797 mfs r11, rpid
798 nop
799 bri 4
800 mfs r3, rear /* Get faulting address */
801 nop
802
803 /* If we are faulting a kernel address, we have to use the
804 * kernel page tables.
805 */
806 ori r4, r0, CONFIG_KERNEL_START
807 cmpu r4, r3, r4
808 bgti r4, ex8
809 ori r4, r0, swapper_pg_dir
810 mts rpid, r0 /* TLB will have 0 TID */
811 nop
812 bri ex9
813
814 /* Get the PGD for the current thread. */
815 ex8:
816 /* get current task address */
817 addi r4 ,CURRENT_TASK, TOPHYS(0);
818 lwi r4, r4, TASK_THREAD+PGDIR
819 ex9:
820 tophys(r4,r4)
821 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
822 andi r5, r5, 0xffc
823/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
824 or r4, r4, r5
825 lwi r4, r4, 0 /* Get L1 entry */
826 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
827 beqi r5, ex10 /* Bail if no table */
828
829 tophys(r5,r5)
830 BSRLI(r6,r3,10) /* Compute PTE address */
831 andi r6, r6, 0xffc
832 andi r5, r5, 0xfffff003
833 or r5, r5, r6
834 lwi r4, r5, 0 /* Get Linux PTE */
835
836 andi r6, r4, _PAGE_PRESENT
837 beqi r6, ex7
838
839 ori r4, r4, _PAGE_ACCESSED
840 swi r4, r5, 0
841
842 /* Most of the Linux PTE is ready to load into the TLB LO.
843 * We set ZSEL, where only the LS-bit determines user access.
844 * We set execute, because we don't have the granularity to
845 * properly set this at the page level (Linux problem).
846 * If shared is set, we cause a zero PID->TID load.
847 * Many of these bits are software only. Bits we don't set
848 * here we (properly should) assume have the appropriate value.
849 */
850 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
851
852 bri finish_tlb_load
853 ex10:
854 /* The bailout. Restore registers to pre-exception conditions
855 * and call the heavyweights to help us out.
856 */
857 mts rpid, r11
858 nop
859 bri 4
860 RESTORE_STATE;
861 bri page_fault_instr_trap
862
863/* Both the instruction and data TLB miss get to this point to load the TLB.
864 * r3 - EA of fault
865 * r4 - TLB LO (info from Linux PTE)
866 * r5, r6 - available to use
867 * PID - loaded with proper value when we get here
868 * Upon exit, we reload everything and RFI.
869 * A common place to load the TLB.
870 */
871 tlb_index:
872 .long 1 /* MS: storing last used tlb index */
873 finish_tlb_load:
874 /* MS: load the last used TLB index. */
875 lwi r5, r0, TOPHYS(tlb_index)
876 addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
877
878/* MS: FIXME this is potential fault, because this is mask not count */
879 andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
880 ori r6, r0, 1
881 cmp r31, r5, r6
882 blti r31, sem
883 addik r5, r6, 1
884 sem:
885 /* MS: save back current TLB index */
886 swi r5, r0, TOPHYS(tlb_index)
887
888 ori r4, r4, _PAGE_HWEXEC /* make it executable */
889 mts rtlbx, r5 /* MS: save current TLB */
890 nop
891 mts rtlblo, r4 /* MS: save to TLB LO */
892 nop
893
894 /* Create EPN. This is the faulting address plus a static
895 * set of bits. These are size, valid, E, U0, and ensure
896 * bits 20 and 21 are zero.
897 */
898 andi r3, r3, 0xfffff000
899 ori r3, r3, 0x0c0
900 mts rtlbhi, r3 /* Load TLB HI */
901 nop
902
903 /* Done...restore registers and get out of here. */
904 ex12:
905 mts rpid, r11
906 nop
907 bri 4
908 RESTORE_STATE;
909 rted r17, 0
910 nop
911
912 /* extern void giveup_fpu(struct task_struct *prev)
913 *
914 * The MicroBlaze processor may have an FPU, so this should not just
915 * return: TBD.
916 */
917 .globl giveup_fpu;
918 .align 4;
919 giveup_fpu:
920 bralid r15,0 /* TBD */
921 nop
922
923 /* At present, this routine just hangs. - extern void abort(void) */
924 .globl abort;
925 .align 4;
926 abort:
927 br r0
928
929 .globl set_context;
930 .align 4;
931 set_context:
932 mts rpid, r5 /* Shadow TLBs are automatically */
933 nop
934 bri 4 /* flushed by changing PID */
935 rtsd r15,8
936 nop
937
938#endif
370.end _hw_exception_handler 939.end _hw_exception_handler
371 940
941#ifdef CONFIG_MMU
942/* Unaligned data access exception last on a 4k page for MMU.
943 * When this is called, we are in virtual mode with exceptions enabled
944 * and registers 1-13,15,17,18 saved.
945 *
946 * R3 = ESR
947 * R4 = EAR
948 * R7 = pointer to saved registers (struct pt_regs *regs)
949 *
950 * This handler perform the access, and returns via ret_from_exc.
951 */
952.global _unaligned_data_exception
953.ent _unaligned_data_exception
954_unaligned_data_exception:
955 andi r8, r3, 0x3E0; /* Mask and extract the register operand */
956 BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */
957 andi r6, r3, 0x400; /* Extract ESR[S] */
958 bneid r6, ex_sw_vm;
959 andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
960ex_lw_vm:
961 beqid r6, ex_lhw_vm;
962 lbui r5, r4, 0; /* Exception address in r4 - delay slot */
963/* Load a word, byte-by-byte from destination address and save it in tmp space*/
964 la r6, r0, ex_tmp_data_loc_0;
965 sbi r5, r6, 0;
966 lbui r5, r4, 1;
967 sbi r5, r6, 1;
968 lbui r5, r4, 2;
969 sbi r5, r6, 2;
970 lbui r5, r4, 3;
971 sbi r5, r6, 3;
972 brid ex_lw_tail_vm;
973/* Get the destination register value into r3 - delay slot */
974 lwi r3, r6, 0;
975ex_lhw_vm:
976 /* Load a half-word, byte-by-byte from destination address and
977 * save it in tmp space */
978 la r6, r0, ex_tmp_data_loc_0;
979 sbi r5, r6, 0;
980 lbui r5, r4, 1;
981 sbi r5, r6, 1;
982 lhui r3, r6, 0; /* Get the destination register value into r3 */
983ex_lw_tail_vm:
984 /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
985 addik r5, r8, lw_table_vm;
986 bra r5;
987ex_lw_end_vm: /* Exception handling of load word, ends */
988 brai ret_from_exc;
989ex_sw_vm:
990/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
991 addik r5, r8, sw_table_vm;
992 bra r5;
993ex_sw_tail_vm:
994 la r5, r0, ex_tmp_data_loc_0;
995 beqid r6, ex_shw_vm;
996 swi r3, r5, 0; /* Get the word - delay slot */
997 /* Store the word, byte-by-byte into destination address */
998 lbui r3, r5, 0;
999 sbi r3, r4, 0;
1000 lbui r3, r5, 1;
1001 sbi r3, r4, 1;
1002 lbui r3, r5, 2;
1003 sbi r3, r4, 2;
1004 lbui r3, r5, 3;
1005 brid ret_from_exc;
1006 sbi r3, r4, 3; /* Delay slot */
1007ex_shw_vm:
1008 /* Store the lower half-word, byte-by-byte into destination address */
1009 lbui r3, r5, 2;
1010 sbi r3, r4, 0;
1011 lbui r3, r5, 3;
1012 brid ret_from_exc;
1013 sbi r3, r4, 1; /* Delay slot */
1014ex_sw_end_vm: /* Exception handling of store word, ends. */
1015.end _unaligned_data_exception
1016#endif /* CONFIG_MMU */
1017
372ex_handler_unhandled: 1018ex_handler_unhandled:
373/* FIXME add handle function for unhandled exception - dump register */ 1019/* FIXME add handle function for unhandled exception - dump register */
374 bri 0 1020 bri 0
375 1021
1022/*
1023 * hw_exception_handler Jump Table
1024 * - Contains code snippets for each register that caused the unalign exception
1025 * - Hence exception handler is NOT self-modifying
1026 * - Separate table for load exceptions and store exceptions.
1027 * - Each table is of size: (8 * 32) = 256 bytes
1028 */
1029
376.section .text 1030.section .text
377.align 4 1031.align 4
378lw_table: 1032lw_table:
@@ -407,7 +1061,11 @@ lw_r27: R3_TO_LWREG (27);
407lw_r28: R3_TO_LWREG (28); 1061lw_r28: R3_TO_LWREG (28);
408lw_r29: R3_TO_LWREG (29); 1062lw_r29: R3_TO_LWREG (29);
409lw_r30: R3_TO_LWREG (30); 1063lw_r30: R3_TO_LWREG (30);
1064#ifdef CONFIG_MMU
1065lw_r31: R3_TO_LWREG_V (31);
1066#else
410lw_r31: R3_TO_LWREG (31); 1067lw_r31: R3_TO_LWREG (31);
1068#endif
411 1069
412sw_table: 1070sw_table:
413sw_r0: SWREG_TO_R3 (0); 1071sw_r0: SWREG_TO_R3 (0);
@@ -441,7 +1099,81 @@ sw_r27: SWREG_TO_R3 (27);
441sw_r28: SWREG_TO_R3 (28); 1099sw_r28: SWREG_TO_R3 (28);
442sw_r29: SWREG_TO_R3 (29); 1100sw_r29: SWREG_TO_R3 (29);
443sw_r30: SWREG_TO_R3 (30); 1101sw_r30: SWREG_TO_R3 (30);
1102#ifdef CONFIG_MMU
1103sw_r31: SWREG_TO_R3_V (31);
1104#else
444sw_r31: SWREG_TO_R3 (31); 1105sw_r31: SWREG_TO_R3 (31);
1106#endif
1107
1108#ifdef CONFIG_MMU
1109lw_table_vm:
1110lw_r0_vm: R3_TO_LWREG_VM (0);
1111lw_r1_vm: R3_TO_LWREG_VM_V (1);
1112lw_r2_vm: R3_TO_LWREG_VM_V (2);
1113lw_r3_vm: R3_TO_LWREG_VM_V (3);
1114lw_r4_vm: R3_TO_LWREG_VM_V (4);
1115lw_r5_vm: R3_TO_LWREG_VM_V (5);
1116lw_r6_vm: R3_TO_LWREG_VM_V (6);
1117lw_r7_vm: R3_TO_LWREG_VM_V (7);
1118lw_r8_vm: R3_TO_LWREG_VM_V (8);
1119lw_r9_vm: R3_TO_LWREG_VM_V (9);
1120lw_r10_vm: R3_TO_LWREG_VM_V (10);
1121lw_r11_vm: R3_TO_LWREG_VM_V (11);
1122lw_r12_vm: R3_TO_LWREG_VM_V (12);
1123lw_r13_vm: R3_TO_LWREG_VM_V (13);
1124lw_r14_vm: R3_TO_LWREG_VM (14);
1125lw_r15_vm: R3_TO_LWREG_VM_V (15);
1126lw_r16_vm: R3_TO_LWREG_VM (16);
1127lw_r17_vm: R3_TO_LWREG_VM_V (17);
1128lw_r18_vm: R3_TO_LWREG_VM_V (18);
1129lw_r19_vm: R3_TO_LWREG_VM (19);
1130lw_r20_vm: R3_TO_LWREG_VM (20);
1131lw_r21_vm: R3_TO_LWREG_VM (21);
1132lw_r22_vm: R3_TO_LWREG_VM (22);
1133lw_r23_vm: R3_TO_LWREG_VM (23);
1134lw_r24_vm: R3_TO_LWREG_VM (24);
1135lw_r25_vm: R3_TO_LWREG_VM (25);
1136lw_r26_vm: R3_TO_LWREG_VM (26);
1137lw_r27_vm: R3_TO_LWREG_VM (27);
1138lw_r28_vm: R3_TO_LWREG_VM (28);
1139lw_r29_vm: R3_TO_LWREG_VM (29);
1140lw_r30_vm: R3_TO_LWREG_VM (30);
1141lw_r31_vm: R3_TO_LWREG_VM_V (31);
1142
1143sw_table_vm:
1144sw_r0_vm: SWREG_TO_R3_VM (0);
1145sw_r1_vm: SWREG_TO_R3_VM_V (1);
1146sw_r2_vm: SWREG_TO_R3_VM_V (2);
1147sw_r3_vm: SWREG_TO_R3_VM_V (3);
1148sw_r4_vm: SWREG_TO_R3_VM_V (4);
1149sw_r5_vm: SWREG_TO_R3_VM_V (5);
1150sw_r6_vm: SWREG_TO_R3_VM_V (6);
1151sw_r7_vm: SWREG_TO_R3_VM_V (7);
1152sw_r8_vm: SWREG_TO_R3_VM_V (8);
1153sw_r9_vm: SWREG_TO_R3_VM_V (9);
1154sw_r10_vm: SWREG_TO_R3_VM_V (10);
1155sw_r11_vm: SWREG_TO_R3_VM_V (11);
1156sw_r12_vm: SWREG_TO_R3_VM_V (12);
1157sw_r13_vm: SWREG_TO_R3_VM_V (13);
1158sw_r14_vm: SWREG_TO_R3_VM (14);
1159sw_r15_vm: SWREG_TO_R3_VM_V (15);
1160sw_r16_vm: SWREG_TO_R3_VM (16);
1161sw_r17_vm: SWREG_TO_R3_VM_V (17);
1162sw_r18_vm: SWREG_TO_R3_VM_V (18);
1163sw_r19_vm: SWREG_TO_R3_VM (19);
1164sw_r20_vm: SWREG_TO_R3_VM (20);
1165sw_r21_vm: SWREG_TO_R3_VM (21);
1166sw_r22_vm: SWREG_TO_R3_VM (22);
1167sw_r23_vm: SWREG_TO_R3_VM (23);
1168sw_r24_vm: SWREG_TO_R3_VM (24);
1169sw_r25_vm: SWREG_TO_R3_VM (25);
1170sw_r26_vm: SWREG_TO_R3_VM (26);
1171sw_r27_vm: SWREG_TO_R3_VM (27);
1172sw_r28_vm: SWREG_TO_R3_VM (28);
1173sw_r29_vm: SWREG_TO_R3_VM (29);
1174sw_r30_vm: SWREG_TO_R3_VM (30);
1175sw_r31_vm: SWREG_TO_R3_VM_V (31);
1176#endif /* CONFIG_MMU */
445 1177
446/* Temporary data structures used in the handler */ 1178/* Temporary data structures used in the handler */
447.section .data 1179.section .data
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 5f71790e3c3c..59ff20e33e0c 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -45,3 +45,5 @@ extern void __udivsi3(void);
45EXPORT_SYMBOL(__udivsi3); 45EXPORT_SYMBOL(__udivsi3);
46extern void __umodsi3(void); 46extern void __umodsi3(void);
47EXPORT_SYMBOL(__umodsi3); 47EXPORT_SYMBOL(__umodsi3);
48extern char *_ebss;
49EXPORT_SYMBOL_GPL(_ebss);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
new file mode 100644
index 000000000000..df16c6287a8e
--- /dev/null
+++ b/arch/microblaze/kernel/misc.S
@@ -0,0 +1,120 @@
1/*
2 * Miscellaneous low-level MMU functions.
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
7 *
8 * Derived from arch/ppc/kernel/misc.S
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 */
14
15#include <linux/linkage.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <linux/errno.h>
19#include <asm/mmu.h>
20#include <asm/page.h>
21
22 .text
23/*
24 * Flush MMU TLB
25 *
26 * We avoid flushing the pinned 0, 1 and possibly 2 entries.
27 */
28.globl _tlbia;
29.align 4;
30_tlbia:
31 addik r12, r0, 63 /* flush all entries (63 - 3) */
32 /* isync */
33_tlbia_1:
34 mts rtlbx, r12
35 nop
36 mts rtlbhi, r0 /* flush: ensure V is clear */
37 nop
38 addik r11, r12, -2
39 bneid r11, _tlbia_1 /* loop for all entries */
40 addik r12, r12, -1
41 /* sync */
42 rtsd r15, 8
43 nop
44
45/*
46 * Flush MMU TLB for a particular address (in r5)
47 */
48.globl _tlbie;
49.align 4;
50_tlbie:
51 mts rtlbsx, r5 /* look up the address in TLB */
52 nop
53 mfs r12, rtlbx /* Retrieve index */
54 nop
55 blti r12, _tlbie_1 /* Check if found */
56 mts rtlbhi, r0 /* flush: ensure V is clear */
57 nop
58_tlbie_1:
59 rtsd r15, 8
60 nop
61
62/*
63 * Allocate TLB entry for early console
64 */
65.globl early_console_reg_tlb_alloc;
66.align 4;
67early_console_reg_tlb_alloc:
68 /*
69 * Load a TLB entry for the UART, so that microblaze_progress() can use
70 * the UARTs nice and early. We use a 4k real==virtual mapping.
71 */
72 ori r4, r0, 63
73 mts rtlbx, r4 /* TLB slot 2 */
74
75 or r4,r5,r0
76 andi r4,r4,0xfffff000
77 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
78
79 andi r5,r5,0xfffff000
80 ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
81
82 mts rtlblo,r4 /* Load the data portion of the entry */
83 nop
84 mts rtlbhi,r5 /* Load the tag portion of the entry */
85 nop
86 rtsd r15, 8
87 nop
88
89/*
90 * Copy a whole page (4096 bytes).
91 */
92#define COPY_16_BYTES \
93 lwi r7, r6, 0; \
94 lwi r8, r6, 4; \
95 lwi r9, r6, 8; \
96 lwi r10, r6, 12; \
97 swi r7, r5, 0; \
98 swi r8, r5, 4; \
99 swi r9, r5, 8; \
100 swi r10, r5, 12
101
102
103/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
104#define DCACHE_LINE_BYTES (4 * 4)
105
106.globl copy_page;
107.align 4;
108copy_page:
109 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
110_copy_page_loop:
111 COPY_16_BYTES
112#if DCACHE_LINE_BYTES >= 32
113 COPY_16_BYTES
114#endif
115 addik r6, r6, DCACHE_LINE_BYTES
116 addik r5, r5, DCACHE_LINE_BYTES
117 bneid r11, _copy_page_loop
118 addik r11, r11, -1
119 rtsd r15, 8
120 nop
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 07d4fa339eda..00b12c6d5326 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
126 else 126 else
127 childregs->r1 = ((unsigned long) ti) + THREAD_SIZE; 127 childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
128 128
129#ifndef CONFIG_MMU
129 memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); 130 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
130 ti->cpu_context.r1 = (unsigned long)childregs; 131 ti->cpu_context.r1 = (unsigned long)childregs;
131 ti->cpu_context.msr = (unsigned long)childregs->msr; 132 ti->cpu_context.msr = (unsigned long)childregs->msr;
133#else
134
135 /* if creating a kernel thread then update the current reg (we don't
136 * want to use the parent's value when restoring by POP_STATE) */
137 if (kernel_mode(regs))
138 /* save new current on stack to use POP_STATE */
139 childregs->CURRENT_TASK = (unsigned long)p;
140 /* if returning to user then use the parent's value of this register */
141
142 /* if we're creating a new kernel thread then just zeroing all
143 * the registers. That's OK for a brand new thread.*/
144 /* Pls. note that some of them will be restored in POP_STATE */
145 if (kernel_mode(regs))
146 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
147 /* if this thread is created for fork/vfork/clone, then we want to
148 * restore all the parent's context */
149 /* in addition to the registers which will be restored by POP_STATE */
150 else {
151 ti->cpu_context = *(struct cpu_context *)regs;
152 childregs->msr |= MSR_UMS;
153 }
154
155 /* FIXME STATE_SAVE_PT_OFFSET; */
156 ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE;
157 /* we should consider the fact that childregs is a copy of the parent
158 * regs which were saved immediately after entering the kernel state
159 * before enabling VM. This MSR will be restored in switch_to and
160 * RETURN() and we want to have the right machine state there
161 * specifically this state must have INTs disabled before and enabled
162 * after performing rtbd
163 * compose the right MSR for RETURN(). It will work for switch_to also
164 * excepting for VM and UMS
165 * don't touch UMS , CARRY and cache bits
166 * right now MSR is a copy of parent one */
167 childregs->msr |= MSR_BIP;
168 childregs->msr &= ~MSR_EIP;
169 childregs->msr |= MSR_IE;
170 childregs->msr &= ~MSR_VM;
171 childregs->msr |= MSR_VMS;
172 childregs->msr |= MSR_EE; /* exceptions will be enabled*/
173
174 ti->cpu_context.msr = (childregs->msr|MSR_VM);
175 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
176#endif
132 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; 177 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
133 178
134 if (clone_flags & CLONE_SETTLS) 179 if (clone_flags & CLONE_SETTLS)
@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
137 return 0; 182 return 0;
138} 183}
139 184
185#ifndef CONFIG_MMU
140/* 186/*
141 * Return saved PC of a blocked thread. 187 * Return saved PC of a blocked thread.
142 */ 188 */
@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
151 else 197 else
152 return ctx->r14; 198 return ctx->r14;
153} 199}
200#endif
154 201
155static void kernel_thread_helper(int (*fn)(void *), void *arg) 202static void kernel_thread_helper(int (*fn)(void *), void *arg)
156{ 203{
@@ -173,6 +220,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
173 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 220 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
174 &regs, 0, NULL, NULL); 221 &regs, 0, NULL, NULL);
175} 222}
223EXPORT_SYMBOL_GPL(kernel_thread);
176 224
177unsigned long get_wchan(struct task_struct *p) 225unsigned long get_wchan(struct task_struct *p)
178{ 226{
@@ -188,3 +236,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
188 regs->r1 = usp; 236 regs->r1 = usp;
189 regs->pt_mode = 0; 237 regs->pt_mode = 0;
190} 238}
239
240#ifdef CONFIG_MMU
241#include <linux/elfcore.h>
242/*
243 * Set up a thread for executing a new program
244 */
245int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
246{
247 return 0; /* MicroBlaze has no separate FPU registers */
248}
249#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 34c48718061a..c005cc6f1aaf 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
509 509
510 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); 510 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
511 if (prop) { 511 if (prop) {
512 initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); 512 initrd_start = (unsigned long)
513 __va((u32)of_read_ulong(prop, l/4));
513 514
514 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); 515 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
515 if (prop) { 516 if (prop) {
516 initrd_end = (unsigned long) 517 initrd_end = (unsigned long)
517 __va(of_read_ulong(prop, l/4)); 518 __va((u32)of_read_ulong(prop, 1/4));
518 initrd_below_start_ok = 1; 519 initrd_below_start_ok = 1;
519 } else { 520 } else {
520 initrd_start = 0; 521 initrd_start = 0;
@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
563 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 564 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
564 565
565#ifdef CONFIG_CMDLINE 566#ifdef CONFIG_CMDLINE
567#ifndef CONFIG_CMDLINE_FORCE
566 if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 568 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
569#endif
567 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 570 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
568#endif /* CONFIG_CMDLINE */ 571#endif /* CONFIG_CMDLINE */
569 572
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index eb6b41758e23..8709bea09604 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -42,10 +42,6 @@ char cmd_line[COMMAND_LINE_SIZE];
42 42
43void __init setup_arch(char **cmdline_p) 43void __init setup_arch(char **cmdline_p)
44{ 44{
45#ifdef CONFIG_CMDLINE_FORCE
46 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
47 strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
48#endif
49 *cmdline_p = cmd_line; 45 *cmdline_p = cmd_line;
50 46
51 console_verbose(); 47 console_verbose();
@@ -102,14 +98,34 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
102{ 98{
103 unsigned long *src, *dst = (unsigned long *)0x0; 99 unsigned long *src, *dst = (unsigned long *)0x0;
104 100
101 /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the
102 * end of kernel. There are two position which we want to check.
103 * The first is __init_end and the second __bss_start.
104 */
105#ifdef CONFIG_MTD_UCLINUX
106 int romfs_size;
107 unsigned int romfs_base;
108 char *old_klimit = klimit;
109
110 romfs_base = (ram ? ram : (unsigned int)&__init_end);
111 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
112 if (!romfs_size) {
113 romfs_base = (unsigned int)&__bss_start;
114 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
115 }
116
117 /* Move ROMFS out of BSS before clearing it */
118 if (romfs_size > 0) {
119 memmove(&_ebss, (int *)romfs_base, romfs_size);
120 klimit += romfs_size;
121 }
122#endif
123
105/* clearing bss section */ 124/* clearing bss section */
106 memset(__bss_start, 0, __bss_stop-__bss_start); 125 memset(__bss_start, 0, __bss_stop-__bss_start);
107 memset(_ssbss, 0, _esbss-_ssbss); 126 memset(_ssbss, 0, _esbss-_ssbss);
108 127
109 /* 128 /* Copy command line passed from bootloader */
110 * Copy command line passed from bootloader, or use default
111 * if none provided, or forced
112 */
113#ifndef CONFIG_CMDLINE_BOOL 129#ifndef CONFIG_CMDLINE_BOOL
114 if (cmdline && cmdline[0] != '\0') 130 if (cmdline && cmdline[0] != '\0')
115 strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); 131 strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
@@ -126,27 +142,15 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
126 printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); 142 printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt);
127 143
128#ifdef CONFIG_MTD_UCLINUX 144#ifdef CONFIG_MTD_UCLINUX
129 { 145 early_printk("Found romfs @ 0x%08x (0x%08x)\n",
130 int size; 146 romfs_base, romfs_size);
131 unsigned int romfs_base; 147 early_printk("#### klimit %p ####\n", old_klimit);
132 romfs_base = (ram ? ram : (unsigned int)&__init_end); 148 BUG_ON(romfs_size < 0); /* What else can we do? */
133 /* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the 149
134 * end of kernel, which is ROMFS_LOCATION defined above. */ 150 early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
135 size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); 151 romfs_size, romfs_base, (unsigned)&_ebss);
136 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 152
137 romfs_base, size); 153 early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
138 early_printk("#### klimit %p ####\n", klimit);
139 BUG_ON(size < 0); /* What else can we do? */
140
141 /* Use memmove to handle likely case of memory overlap */
142 early_printk("Moving 0x%08x bytes from 0x%08x to 0x%08x\n",
143 size, romfs_base, (unsigned)&_ebss);
144 memmove(&_ebss, (int *)romfs_base, size);
145
146 /* update klimit */
147 klimit += PAGE_ALIGN(size);
148 early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
149 }
150#endif 154#endif
151 155
152 for (src = __ivt_start; src < __ivt_end; src++, dst++) 156 for (src = __ivt_start; src < __ivt_end; src++, dst++)
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 40d36931e363..4c0e6521b114 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -152,8 +152,8 @@ struct rt_sigframe {
152 unsigned long tramp[2]; /* signal trampoline */ 152 unsigned long tramp[2]; /* signal trampoline */
153}; 153};
154 154
155static int 155static int restore_sigcontext(struct pt_regs *regs,
156restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p) 156 struct sigcontext __user *sc, int *rval_p)
157{ 157{
158 unsigned int err = 0; 158 unsigned int err = 0;
159 159
@@ -211,11 +211,10 @@ badframe:
211 211
212asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 212asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
213{ 213{
214 struct rt_sigframe *frame = 214 struct rt_sigframe __user *frame =
215 (struct rt_sigframe *)(regs->r1 + STATE_SAVE_ARG_SPACE); 215 (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE);
216 216
217 sigset_t set; 217 sigset_t set;
218 stack_t st;
219 int rval; 218 int rval;
220 219
221 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 220 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -233,11 +232,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
233 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) 232 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
234 goto badframe; 233 goto badframe;
235 234
236 if (__copy_from_user((void *)&st, &frame->uc.uc_stack, sizeof(st)))
237 goto badframe;
238 /* It is more difficult to avoid calling this function than to 235 /* It is more difficult to avoid calling this function than to
239 call it and ignore errors. */ 236 call it and ignore errors. */
240 do_sigaltstack(&st, NULL, regs->r1); 237 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
238 goto badframe;
241 239
242 return rval; 240 return rval;
243 241
@@ -251,7 +249,7 @@ badframe:
251 */ 249 */
252 250
253static int 251static int
254setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, 252setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
255 unsigned long mask) 253 unsigned long mask)
256{ 254{
257 int err = 0; 255 int err = 0;
@@ -278,7 +276,7 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
278/* 276/*
279 * Determine which stack to use.. 277 * Determine which stack to use..
280 */ 278 */
281static inline void * 279static inline void __user *
282get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) 280get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
283{ 281{
284 /* Default to using normal stack */ 282 /* Default to using normal stack */
@@ -287,87 +285,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
287 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) 285 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp))
288 sp = current->sas_ss_sp + current->sas_ss_size; 286 sp = current->sas_ss_sp + current->sas_ss_size;
289 287
290 return (void *)((sp - frame_size) & -8UL); 288 return (void __user *)((sp - frame_size) & -8UL);
291}
292
293static void setup_frame(int sig, struct k_sigaction *ka,
294 sigset_t *set, struct pt_regs *regs)
295{
296 struct sigframe *frame;
297 int err = 0;
298 int signal;
299
300 frame = get_sigframe(ka, regs, sizeof(*frame));
301
302 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
303 goto give_sigsegv;
304
305 signal = current_thread_info()->exec_domain
306 && current_thread_info()->exec_domain->signal_invmap
307 && sig < 32
308 ? current_thread_info()->exec_domain->signal_invmap[sig]
309 : sig;
310
311 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
312
313 if (_NSIG_WORDS > 1) {
314 err |= __copy_to_user(frame->extramask, &set->sig[1],
315 sizeof(frame->extramask));
316 }
317
318 /* Set up to return from userspace. If provided, use a stub
319 already in userspace. */
320 /* minus 8 is offset to cater for "rtsd r15,8" offset */
321 if (ka->sa.sa_flags & SA_RESTORER) {
322 regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
323 } else {
324 /* Note, these encodings are _big endian_! */
325
326 /* addi r12, r0, __NR_sigreturn */
327 err |= __put_user(0x31800000 | __NR_sigreturn ,
328 frame->tramp + 0);
329 /* brki r14, 0x8 */
330 err |= __put_user(0xb9cc0008, frame->tramp + 1);
331
332 /* Return from sighandler will jump to the tramp.
333 Negative 8 offset because return is rtsd r15, 8 */
334 regs->r15 = ((unsigned long)frame->tramp)-8;
335
336 __invalidate_cache_sigtramp((unsigned long)frame->tramp);
337 }
338
339 if (err)
340 goto give_sigsegv;
341
342 /* Set up registers for signal handler */
343 regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE;
344
345 /* Signal handler args: */
346 regs->r5 = signal; /* Arg 0: signum */
347 regs->r6 = (unsigned long) &frame->sc; /* arg 1: sigcontext */
348
349 /* Offset of 4 to handle microblaze rtid r14, 0 */
350 regs->pc = (unsigned long)ka->sa.sa_handler;
351
352 set_fs(USER_DS);
353
354#ifdef DEBUG_SIG
355 printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
356 current->comm, current->pid, frame, regs->pc);
357#endif
358
359 return;
360
361give_sigsegv:
362 if (sig == SIGSEGV)
363 ka->sa.sa_handler = SIG_DFL;
364 force_sig(SIGSEGV, current);
365} 289}
366 290
367static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 291static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
368 sigset_t *set, struct pt_regs *regs) 292 sigset_t *set, struct pt_regs *regs)
369{ 293{
370 struct rt_sigframe *frame; 294 struct rt_sigframe __user *frame;
371 int err = 0; 295 int err = 0;
372 int signal; 296 int signal;
373 297
@@ -382,7 +306,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
382 ? current_thread_info()->exec_domain->signal_invmap[sig] 306 ? current_thread_info()->exec_domain->signal_invmap[sig]
383 : sig; 307 : sig;
384 308
385 err |= copy_siginfo_to_user(&frame->info, info); 309 if (info)
310 err |= copy_siginfo_to_user(&frame->info, info);
386 311
387 /* Create the ucontext. */ 312 /* Create the ucontext. */
388 err |= __put_user(0, &frame->uc.uc_flags); 313 err |= __put_user(0, &frame->uc.uc_flags);
@@ -463,7 +388,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
463 case -ERESTARTNOINTR: 388 case -ERESTARTNOINTR:
464do_restart: 389do_restart:
465 /* offset of 4 bytes to re-execute trap (brki) instruction */ 390 /* offset of 4 bytes to re-execute trap (brki) instruction */
391#ifndef CONFIG_MMU
466 regs->pc -= 4; 392 regs->pc -= 4;
393#else
394 /* offset of 8 bytes required = 4 for rtbd
395 offset, plus 4 for size of
396 "brki r14,8"
397 instruction. */
398 regs->pc -= 8;
399#endif
467 break; 400 break;
468 } 401 }
469} 402}
@@ -480,7 +413,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
480 if (ka->sa.sa_flags & SA_SIGINFO) 413 if (ka->sa.sa_flags & SA_SIGINFO)
481 setup_rt_frame(sig, ka, info, oldset, regs); 414 setup_rt_frame(sig, ka, info, oldset, regs);
482 else 415 else
483 setup_frame(sig, ka, oldset, regs); 416 setup_rt_frame(sig, ka, NULL, oldset, regs);
484 417
485 if (ka->sa.sa_flags & SA_ONESHOT) 418 if (ka->sa.sa_flags & SA_ONESHOT)
486 ka->sa.sa_handler = SIG_DFL; 419 ka->sa.sa_handler = SIG_DFL;
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 3bb42ec924c2..376d1789f7c0 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -2,7 +2,11 @@ ENTRY(sys_call_table)
2 .long sys_restart_syscall /* 0 - old "setup()" system call, 2 .long sys_restart_syscall /* 0 - old "setup()" system call,
3 * used for restarting */ 3 * used for restarting */
4 .long sys_exit 4 .long sys_exit
5 .long sys_ni_syscall /* was fork */ 5#ifdef CONFIG_MMU
6 .long sys_fork_wrapper
7#else
8 .long sys_ni_syscall
9#endif
6 .long sys_read 10 .long sys_read
7 .long sys_write 11 .long sys_write
8 .long sys_open /* 5 */ 12 .long sys_open /* 5 */
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 293ef486013a..eaaaf805f31b 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,14 +22,6 @@ void trap_init(void)
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23} 23}
24 24
25void __bad_xchg(volatile void *ptr, int size)
26{
27 printk(KERN_INFO "xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
28 __builtin_return_address(0), ptr, size);
29 BUG();
30}
31EXPORT_SYMBOL(__bad_xchg);
32
33static int kstack_depth_to_print = 24; 25static int kstack_depth_to_print = 24;
34 26
35static int __init kstack_setup(char *s) 27static int __init kstack_setup(char *s)
@@ -105,3 +97,37 @@ void dump_stack(void)
105 show_stack(NULL, NULL); 97 show_stack(NULL, NULL);
106} 98}
107EXPORT_SYMBOL(dump_stack); 99EXPORT_SYMBOL(dump_stack);
100
101#ifdef CONFIG_MMU
102void __bug(const char *file, int line, void *data)
103{
104 if (data)
105 printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n",
106 file, line, data);
107 else
108 printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line);
109
110 machine_halt();
111}
112
113int bad_trap(int trap_num, struct pt_regs *regs)
114{
115 printk(KERN_CRIT
116 "unimplemented trap %d called at 0x%08lx, pid %d!\n",
117 trap_num, regs->pc, current->pid);
118 return -ENOSYS;
119}
120
121int debug_trap(struct pt_regs *regs)
122{
123 int i;
124 printk(KERN_CRIT "debug trap\n");
125 for (i = 0; i < 32; i++) {
126 /* printk("r%i:%08X\t",i,regs->gpr[i]); */
127 if ((i % 4) == 3)
128 printk(KERN_CRIT "\n");
129 }
130 printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr);
131 return -ENOSYS;
132}
133#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 840385e51291..8ae807ab7a51 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -17,8 +17,7 @@ ENTRY(_start)
17jiffies = jiffies_64 + 4; 17jiffies = jiffies_64 + 4;
18 18
19SECTIONS { 19SECTIONS {
20 . = CONFIG_KERNEL_BASE_ADDR; 20 . = CONFIG_KERNEL_START;
21
22 .text : { 21 .text : {
23 _text = . ; 22 _text = . ;
24 _stext = . ; 23 _stext = . ;
@@ -132,6 +131,8 @@ SECTIONS {
132 __con_initcall_end = .; 131 __con_initcall_end = .;
133 } 132 }
134 133
134 SECURITY_INIT
135
135 __init_end_before_initramfs = .; 136 __init_end_before_initramfs = .;
136 137
137 .init.ramfs ALIGN(4096) : { 138 .init.ramfs ALIGN(4096) : {
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index d27126bf306a..71c8cb6c9e43 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,4 +10,5 @@ else
10lib-y += memcpy.o memmove.o 10lib-y += memcpy.o memmove.o
11endif 11endif
12 12
13lib-y += uaccess.o 13lib-$(CONFIG_NO_MMU) += uaccess.o
14lib-$(CONFIG_MMU) += uaccess_old.o
diff --git a/arch/microblaze/lib/checksum.c b/arch/microblaze/lib/checksum.c
index 809340070a13..f08e74591418 100644
--- a/arch/microblaze/lib/checksum.c
+++ b/arch/microblaze/lib/checksum.c
@@ -32,9 +32,10 @@
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access 32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */ 33 kills, so most of the assembly has to go. */
34 34
35#include <net/checksum.h>
36#include <asm/checksum.h>
37#include <linux/module.h> 35#include <linux/module.h>
36#include <net/checksum.h>
37
38#include <asm/byteorder.h>
38 39
39static inline unsigned short from32to16(unsigned long x) 40static inline unsigned short from32to16(unsigned long x)
40{ 41{
@@ -102,6 +103,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
102{ 103{
103 return (__force __sum16)~do_csum(iph, ihl*4); 104 return (__force __sum16)~do_csum(iph, ihl*4);
104} 105}
106EXPORT_SYMBOL(ip_fast_csum);
105 107
106/* 108/*
107 * computes the checksum of a memory block at buff, length len, 109 * computes the checksum of a memory block at buff, length len,
@@ -115,15 +117,16 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
115 * 117 *
116 * it's best to have buff aligned on a 32-bit boundary 118 * it's best to have buff aligned on a 32-bit boundary
117 */ 119 */
118__wsum csum_partial(const void *buff, int len, __wsum sum) 120__wsum csum_partial(const void *buff, int len, __wsum wsum)
119{ 121{
122 unsigned int sum = (__force unsigned int)wsum;
120 unsigned int result = do_csum(buff, len); 123 unsigned int result = do_csum(buff, len);
121 124
122 /* add in old sum, and carry.. */ 125 /* add in old sum, and carry.. */
123 result += sum; 126 result += sum;
124 if (sum > result) 127 if (sum > result)
125 result += 1; 128 result += 1;
126 return result; 129 return (__force __wsum)result;
127} 130}
128EXPORT_SYMBOL(csum_partial); 131EXPORT_SYMBOL(csum_partial);
129 132
@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial);
131 * this routine is used for miscellaneous IP-like checksums, mainly 134 * this routine is used for miscellaneous IP-like checksums, mainly
132 * in icmp.c 135 * in icmp.c
133 */ 136 */
134__sum16 ip_compute_csum(const unsigned char *buff, int len) 137__sum16 ip_compute_csum(const void *buff, int len)
135{ 138{
136 return ~do_csum(buff, len); 139 return (__force __sum16)~do_csum(buff, len);
137} 140}
138EXPORT_SYMBOL(ip_compute_csum); 141EXPORT_SYMBOL(ip_compute_csum);
139 142
@@ -141,12 +144,18 @@ EXPORT_SYMBOL(ip_compute_csum);
141 * copy from fs while checksumming, otherwise like csum_partial 144 * copy from fs while checksumming, otherwise like csum_partial
142 */ 145 */
143__wsum 146__wsum
144csum_partial_copy_from_user(const char __user *src, char *dst, int len, 147csum_partial_copy_from_user(const void __user *src, void *dst, int len,
145 int sum, int *csum_err) 148 __wsum sum, int *csum_err)
146{ 149{
147 if (csum_err) 150 int missing;
151
152 missing = __copy_from_user(dst, src, len);
153 if (missing) {
154 memset(dst + len - missing, 0, missing);
155 *csum_err = -EFAULT;
156 } else
148 *csum_err = 0; 157 *csum_err = 0;
149 memcpy(dst, src, len); 158
150 return csum_partial(dst, len, sum); 159 return csum_partial(dst, len, sum);
151} 160}
152EXPORT_SYMBOL(csum_partial_copy_from_user); 161EXPORT_SYMBOL(csum_partial_copy_from_user);
@@ -155,7 +164,7 @@ EXPORT_SYMBOL(csum_partial_copy_from_user);
155 * copy from ds while checksumming, otherwise like csum_partial 164 * copy from ds while checksumming, otherwise like csum_partial
156 */ 165 */
157__wsum 166__wsum
158csum_partial_copy(const char *src, char *dst, int len, int sum) 167csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
159{ 168{
160 memcpy(dst, src, len); 169 memcpy(dst, src, len);
161 return csum_partial(dst, len, sum); 170 return csum_partial(dst, len, sum);
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index 5880119c4487..6a907c58a4bc 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -154,8 +154,3 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
154} 154}
155EXPORT_SYMBOL(memcpy); 155EXPORT_SYMBOL(memcpy);
156#endif /* __HAVE_ARCH_MEMCPY */ 156#endif /* __HAVE_ARCH_MEMCPY */
157
158void *cacheable_memcpy(void *d, const void *s, __kernel_size_t c)
159{
160 return memcpy(d, s, c);
161}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
new file mode 100644
index 000000000000..67f991c14b8a
--- /dev/null
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2009 PetaLogix
4 * Copyright (C) 2007 LynuxWorks, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/errno.h>
12#include <linux/linkage.h>
13
14/*
15 * int __strncpy_user(char *to, char *from, int len);
16 *
17 * Returns:
18 * -EFAULT for an exception
19 * len if we hit the buffer limit
20 * bytes copied
21 */
22
23 .text
24.globl __strncpy_user;
25.align 4;
26__strncpy_user:
27
28 /*
29 * r5 - to
30 * r6 - from
31 * r7 - len
32 * r3 - temp count
33 * r4 - temp val
34 */
35 addik r3,r7,0 /* temp_count = len */
36 beqi r3,3f
371:
38 lbu r4,r6,r0
39 sb r4,r5,r0
40
41 addik r3,r3,-1
42 beqi r3,2f /* break on len */
43
44 addik r5,r5,1
45 bneid r4,1b
46 addik r6,r6,1 /* delay slot */
47 addik r3,r3,1 /* undo "temp_count--" */
482:
49 rsubk r3,r3,r7 /* temp_count = len - temp_count */
503:
51 rtsd r15,8
52 nop
53
54
55 .section .fixup, "ax"
56 .align 2
574:
58 brid 3b
59 addik r3,r0, -EFAULT
60
61 .section __ex_table, "a"
62 .word 1b,4b
63
64/*
65 * int __strnlen_user(char __user *str, int maxlen);
66 *
67 * Returns:
68 * 0 on error
69 * maxlen + 1 if no NUL byte found within maxlen bytes
70 * size of the string (including NUL byte)
71 */
72
73 .text
74.globl __strnlen_user;
75.align 4;
76__strnlen_user:
77 addik r3,r6,0
78 beqi r3,3f
791:
80 lbu r4,r5,r0
81 beqid r4,2f /* break on NUL */
82 addik r3,r3,-1 /* delay slot */
83
84 bneid r3,1b
85 addik r5,r5,1 /* delay slot */
86
87 addik r3,r3,-1 /* for break on len */
882:
89 rsubk r3,r3,r6
903:
91 rtsd r15,8
92 nop
93
94
95 .section .fixup,"ax"
964:
97 brid 3b
98 addk r3,r0,r0
99
100 .section __ex_table,"a"
101 .word 1b,4b
102
103/*
104 * int __copy_tofrom_user(char *to, char *from, int len)
105 * Return:
106 * 0 on success
107 * number of not copied bytes on error
108 */
109 .text
110.globl __copy_tofrom_user;
111.align 4;
112__copy_tofrom_user:
113 /*
114 * r5 - to
115 * r6 - from
116 * r7, r3 - count
117 * r4 - tempval
118 */
119 addik r3,r7,0
120 beqi r3,3f
1211:
122 lbu r4,r6,r0
123 addik r6,r6,1
1242:
125 sb r4,r5,r0
126 addik r3,r3,-1
127 bneid r3,1b
128 addik r5,r5,1 /* delay slot */
1293:
130 rtsd r15,8
131 nop
132
133
134 .section __ex_table,"a"
135 .word 1b,3b,2b,3b
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index bf9e4479a1fd..6c8a924d9e26 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-y := init.o 5obj-y := init.o
6
7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
new file mode 100644
index 000000000000..5e67cd1fab40
--- /dev/null
+++ b/arch/microblaze/mm/fault.c
@@ -0,0 +1,304 @@
1/*
2 * arch/microblaze/mm/fault.c
3 *
4 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
5 *
6 * Derived from "arch/ppc/mm/fault.c"
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Modified by Cort Dougan and Paul Mackerras.
13 *
14 * This file is subject to the terms and conditions of the GNU General
15 * Public License. See the file COPYING in the main directory of this
16 * archive for more details.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/string.h>
26#include <linux/types.h>
27#include <linux/ptrace.h>
28#include <linux/mman.h>
29#include <linux/mm.h>
30#include <linux/interrupt.h>
31
32#include <asm/page.h>
33#include <asm/pgtable.h>
34#include <asm/mmu.h>
35#include <asm/mmu_context.h>
36#include <asm/system.h>
37#include <linux/uaccess.h>
38#include <asm/exceptions.h>
39
40#if defined(CONFIG_KGDB)
41int debugger_kernel_faults = 1;
42#endif
43
44static unsigned long pte_misses; /* updated by do_page_fault() */
45static unsigned long pte_errors; /* updated by do_page_fault() */
46
47/*
48 * Check whether the instruction at regs->pc is a store using
49 * an update addressing form which will update r1.
50 */
51static int store_updates_sp(struct pt_regs *regs)
52{
53 unsigned int inst;
54
55 if (get_user(inst, (unsigned int *)regs->pc))
56 return 0;
57 /* check for 1 in the rD field */
58 if (((inst >> 21) & 0x1f) != 1)
59 return 0;
60 /* check for store opcodes */
61 if ((inst & 0xd0000000) == 0xd0000000)
62 return 1;
63 return 0;
64}
65
66
67/*
68 * bad_page_fault is called when we have a bad access from the kernel.
69 * It is called from do_page_fault above and from some of the procedures
70 * in traps.c.
71 */
72static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
73{
74 const struct exception_table_entry *fixup;
75/* MS: no context */
76 /* Are we prepared to handle this fault? */
77 fixup = search_exception_tables(regs->pc);
78 if (fixup) {
79 regs->pc = fixup->fixup;
80 return;
81 }
82
83 /* kernel has accessed a bad area */
84#if defined(CONFIG_KGDB)
85 if (debugger_kernel_faults)
86 debugger(regs);
87#endif
88 die("kernel access of bad area", regs, sig);
89}
90
91/*
92 * The error_code parameter is ESR for a data fault,
93 * 0 for an instruction fault.
94 */
95void do_page_fault(struct pt_regs *regs, unsigned long address,
96 unsigned long error_code)
97{
98 struct vm_area_struct *vma;
99 struct mm_struct *mm = current->mm;
100 siginfo_t info;
101 int code = SEGV_MAPERR;
102 int is_write = error_code & ESR_S;
103 int fault;
104
105 regs->ear = address;
106 regs->esr = error_code;
107
108 /* On a kernel SLB miss we can only check for a valid exception entry */
109 if (kernel_mode(regs) && (address >= TASK_SIZE)) {
110 printk(KERN_WARNING "kernel task_size exceed");
111 _exception(SIGSEGV, regs, code, address);
112 }
113
114 /* for instr TLB miss and instr storage exception ESR_S is undefined */
115 if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
116 is_write = 0;
117
118#if defined(CONFIG_KGDB)
119 if (debugger_fault_handler && regs->trap == 0x300) {
120 debugger_fault_handler(regs);
121 return;
122 }
123#endif /* CONFIG_KGDB */
124
125 if (in_atomic() || mm == NULL) {
126 /* FIXME */
127 if (kernel_mode(regs)) {
128 printk(KERN_EMERG
129 "Page fault in kernel mode - Oooou!!! pid %d\n",
130 current->pid);
131 _exception(SIGSEGV, regs, code, address);
132 return;
133 }
134 /* in_atomic() in user mode is really bad,
135 as is current->mm == NULL. */
136 printk(KERN_EMERG "Page fault in user mode with "
137 "in_atomic(), mm = %p\n", mm);
138 printk(KERN_EMERG "r15 = %lx MSR = %lx\n",
139 regs->r15, regs->msr);
140 die("Weird page fault", regs, SIGSEGV);
141 }
142
143 /* When running in the kernel we expect faults to occur only to
144 * addresses in user space. All other faults represent errors in the
145 * kernel and should generate an OOPS. Unfortunately, in the case of an
146 * erroneous fault occurring in a code path which already holds mmap_sem
147 * we will deadlock attempting to validate the fault against the
148 * address space. Luckily the kernel only validly references user
149 * space from well defined areas of code, which are listed in the
150 * exceptions table.
151 *
152 * As the vast majority of faults will be valid we will only perform
153 * the source reference check when there is a possibility of a deadlock.
154 * Attempt to lock the address space, if we cannot we then validate the
155 * source. If this is invalid we can skip the address space check,
156 * thus avoiding the deadlock.
157 */
158 if (!down_read_trylock(&mm->mmap_sem)) {
159 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
160 goto bad_area_nosemaphore;
161
162 down_read(&mm->mmap_sem);
163 }
164
165 vma = find_vma(mm, address);
166 if (!vma)
167 goto bad_area;
168
169 if (vma->vm_start <= address)
170 goto good_area;
171
172 if (!(vma->vm_flags & VM_GROWSDOWN))
173 goto bad_area;
174
175 if (!is_write)
176 goto bad_area;
177
178 /*
179 * N.B. The ABI allows programs to access up to
180 * a few hundred bytes below the stack pointer (TBD).
181 * The kernel signal delivery code writes up to about 1.5kB
182 * below the stack pointer (r1) before decrementing it.
183 * The exec code can write slightly over 640kB to the stack
184 * before setting the user r1. Thus we allow the stack to
185 * expand to 1MB without further checks.
186 */
187 if (address + 0x100000 < vma->vm_end) {
188
189 /* get user regs even if this fault is in kernel mode */
190 struct pt_regs *uregs = current->thread.regs;
191 if (uregs == NULL)
192 goto bad_area;
193
194 /*
195 * A user-mode access to an address a long way below
196 * the stack pointer is only valid if the instruction
197 * is one which would update the stack pointer to the
198 * address accessed if the instruction completed,
199 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
200 * (or the byte, halfword, float or double forms).
201 *
202 * If we don't check this then any write to the area
203 * between the last mapped region and the stack will
204 * expand the stack rather than segfaulting.
205 */
206 if (address + 2048 < uregs->r1
207 && (kernel_mode(regs) || !store_updates_sp(regs)))
208 goto bad_area;
209 }
210 if (expand_stack(vma, address))
211 goto bad_area;
212
213good_area:
214 code = SEGV_ACCERR;
215
216 /* a write */
217 if (is_write) {
218 if (!(vma->vm_flags & VM_WRITE))
219 goto bad_area;
220 /* a read */
221 } else {
222 /* protection fault */
223 if (error_code & 0x08000000)
224 goto bad_area;
225 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
226 goto bad_area;
227 }
228
229 /*
230 * If for any reason at all we couldn't handle the fault,
231 * make sure we exit gracefully rather than endlessly redo
232 * the fault.
233 */
234survive:
235 fault = handle_mm_fault(mm, vma, address, is_write);
236 if (unlikely(fault & VM_FAULT_ERROR)) {
237 if (fault & VM_FAULT_OOM)
238 goto out_of_memory;
239 else if (fault & VM_FAULT_SIGBUS)
240 goto do_sigbus;
241 BUG();
242 }
243 if (fault & VM_FAULT_MAJOR)
244 current->maj_flt++;
245 else
246 current->min_flt++;
247 up_read(&mm->mmap_sem);
248 /*
249 * keep track of tlb+htab misses that are good addrs but
250 * just need pte's created via handle_mm_fault()
251 * -- Cort
252 */
253 pte_misses++;
254 return;
255
256bad_area:
257 up_read(&mm->mmap_sem);
258
259bad_area_nosemaphore:
260 pte_errors++;
261
262 /* User mode accesses cause a SIGSEGV */
263 if (user_mode(regs)) {
264 _exception(SIGSEGV, regs, code, address);
265/* info.si_signo = SIGSEGV;
266 info.si_errno = 0;
267 info.si_code = code;
268 info.si_addr = (void *) address;
269 force_sig_info(SIGSEGV, &info, current);*/
270 return;
271 }
272
273 bad_page_fault(regs, address, SIGSEGV);
274 return;
275
276/*
277 * We ran out of memory, or some other thing happened to us that made
278 * us unable to handle the page fault gracefully.
279 */
280out_of_memory:
281 if (current->pid == 1) {
282 yield();
283 down_read(&mm->mmap_sem);
284 goto survive;
285 }
286 up_read(&mm->mmap_sem);
287 printk(KERN_WARNING "VM: killing process %s\n", current->comm);
288 if (user_mode(regs))
289 do_exit(SIGKILL);
290 bad_page_fault(regs, address, SIGKILL);
291 return;
292
293do_sigbus:
294 up_read(&mm->mmap_sem);
295 if (user_mode(regs)) {
296 info.si_signo = SIGBUS;
297 info.si_errno = 0;
298 info.si_code = BUS_ADRERR;
299 info.si_addr = (void __user *)address;
300 force_sig_info(SIGBUS, &info, current);
301 return;
302 }
303 bad_page_fault(regs, address, SIGBUS);
304}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index b0c8213cd6cf..b5a701cd71e0 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -23,8 +23,16 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/tlb.h> 24#include <asm/tlb.h>
25 25
26#ifndef CONFIG_MMU
26unsigned int __page_offset; 27unsigned int __page_offset;
27/* EXPORT_SYMBOL(__page_offset); */ 28EXPORT_SYMBOL(__page_offset);
29
30#else
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32
33int mem_init_done;
34static int init_bootmem_done;
35#endif /* CONFIG_MMU */
28 36
29char *klimit = _end; 37char *klimit = _end;
30 38
@@ -32,28 +40,26 @@ char *klimit = _end;
32 * Initialize the bootmem system and give it all the memory we 40 * Initialize the bootmem system and give it all the memory we
33 * have available. 41 * have available.
34 */ 42 */
35unsigned int memory_start; 43unsigned long memory_start;
36unsigned int memory_end; /* due to mm/nommu.c */ 44unsigned long memory_end; /* due to mm/nommu.c */
37unsigned int memory_size; 45unsigned long memory_size;
38 46
39/* 47/*
40 * paging_init() sets up the page tables - in fact we've already done this. 48 * paging_init() sets up the page tables - in fact we've already done this.
41 */ 49 */
42static void __init paging_init(void) 50static void __init paging_init(void)
43{ 51{
44 int i;
45 unsigned long zones_size[MAX_NR_ZONES]; 52 unsigned long zones_size[MAX_NR_ZONES];
46 53
54 /* Clean every zones */
55 memset(zones_size, 0, sizeof(zones_size));
56
47 /* 57 /*
48 * old: we can DMA to/from any address.put all page into ZONE_DMA 58 * old: we can DMA to/from any address.put all page into ZONE_DMA
49 * We use only ZONE_NORMAL 59 * We use only ZONE_NORMAL
50 */ 60 */
51 zones_size[ZONE_NORMAL] = max_mapnr; 61 zones_size[ZONE_NORMAL] = max_mapnr;
52 62
53 /* every other zones are empty */
54 for (i = 1; i < MAX_NR_ZONES; i++)
55 zones_size[i] = 0;
56
57 free_area_init(zones_size); 63 free_area_init(zones_size);
58} 64}
59 65
@@ -61,6 +67,7 @@ void __init setup_memory(void)
61{ 67{
62 int i; 68 int i;
63 unsigned long map_size; 69 unsigned long map_size;
70#ifndef CONFIG_MMU
64 u32 kernel_align_start, kernel_align_size; 71 u32 kernel_align_start, kernel_align_size;
65 72
66 /* Find main memory where is the kernel */ 73 /* Find main memory where is the kernel */
@@ -93,6 +100,7 @@ void __init setup_memory(void)
93 __func__, kernel_align_start, kernel_align_start 100 __func__, kernel_align_start, kernel_align_start
94 + kernel_align_size, kernel_align_size); 101 + kernel_align_size, kernel_align_size);
95 102
103#endif
96 /* 104 /*
97 * Kernel: 105 * Kernel:
98 * start: base phys address of kernel - page align 106 * start: base phys address of kernel - page align
@@ -121,9 +129,13 @@ void __init setup_memory(void)
121 * for 4GB of memory, using 4kB pages), plus 1 page 129 * for 4GB of memory, using 4kB pages), plus 1 page
122 * (in case the address isn't page-aligned). 130 * (in case the address isn't page-aligned).
123 */ 131 */
132#ifndef CONFIG_MMU
124 map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), 133 map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)),
125 min_low_pfn, max_low_pfn); 134 min_low_pfn, max_low_pfn);
126 135#else
136 map_size = init_bootmem_node(&contig_page_data,
137 PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn);
138#endif
127 lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); 139 lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size);
128 140
129 /* free bootmem is whole main memory */ 141 /* free bootmem is whole main memory */
@@ -137,6 +149,9 @@ void __init setup_memory(void)
137 reserve_bootmem(lmb.reserved.region[i].base, 149 reserve_bootmem(lmb.reserved.region[i].base,
138 lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); 150 lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT);
139 } 151 }
152#ifdef CONFIG_MMU
153 init_bootmem_done = 1;
154#endif
140 paging_init(); 155 paging_init();
141} 156}
142 157
@@ -191,11 +206,145 @@ void __init mem_init(void)
191 printk(KERN_INFO "Memory: %luk/%luk available\n", 206 printk(KERN_INFO "Memory: %luk/%luk available\n",
192 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 207 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
193 num_physpages << (PAGE_SHIFT-10)); 208 num_physpages << (PAGE_SHIFT-10));
209#ifdef CONFIG_MMU
210 mem_init_done = 1;
211#endif
194} 212}
195 213
214#ifndef CONFIG_MMU
196/* Check against bounds of physical memory */ 215/* Check against bounds of physical memory */
197int ___range_ok(unsigned long addr, unsigned long size) 216int ___range_ok(unsigned long addr, unsigned long size)
198{ 217{
199 return ((addr < memory_start) || 218 return ((addr < memory_start) ||
200 ((addr + size) > memory_end)); 219 ((addr + size) > memory_end));
201} 220}
221EXPORT_SYMBOL(___range_ok);
222
223#else
224int page_is_ram(unsigned long pfn)
225{
226 return pfn < max_low_pfn;
227}
228
229/*
230 * Check for command-line options that affect what MMU_init will do.
231 */
232static void mm_cmdline_setup(void)
233{
234 unsigned long maxmem = 0;
235 char *p = cmd_line;
236
237 /* Look for mem= option on command line */
238 p = strstr(cmd_line, "mem=");
239 if (p) {
240 p += 4;
241 maxmem = memparse(p, &p);
242 if (maxmem && memory_size > maxmem) {
243 memory_size = maxmem;
244 memory_end = memory_start + memory_size;
245 lmb.memory.region[0].size = memory_size;
246 }
247 }
248}
249
250/*
251 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
252 */
253static void __init mmu_init_hw(void)
254{
255 /*
256 * The Zone Protection Register (ZPR) defines how protection will
257 * be applied to every page which is a member of a given zone. At
258 * present, we utilize only two of the zones.
259 * The zone index bits (of ZSEL) in the PTE are used for software
260 * indicators, except the LSB. For user access, zone 1 is used,
261 * for kernel access, zone 0 is used. We set all but zone 1
262 * to zero, allowing only kernel access as indicated in the PTE.
263 * For zone 1, we set a 01 binary (a value of 10 will not work)
264 * to allow user access as indicated in the PTE. This also allows
265 * kernel access as indicated in the PTE.
266 */
267 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
268 "mts rzpr, r11;"
269 : : : "r11");
270}
271
272/*
273 * MMU_init sets up the basic memory mappings for the kernel,
274 * including both RAM and possibly some I/O regions,
275 * and sets up the page tables and the MMU hardware ready to go.
276 */
277
278/* called from head.S */
279asmlinkage void __init mmu_init(void)
280{
281 unsigned int kstart, ksize;
282
283 if (!lmb.reserved.cnt) {
284 printk(KERN_EMERG "Error memory count\n");
285 machine_restart(NULL);
286 }
287
288 if ((u32) lmb.memory.region[0].size < 0x1000000) {
289 printk(KERN_EMERG "Memory must be greater than 16MB\n");
290 machine_restart(NULL);
291 }
292 /* Find main memory where the kernel is */
293 memory_start = (u32) lmb.memory.region[0].base;
294 memory_end = (u32) lmb.memory.region[0].base +
295 (u32) lmb.memory.region[0].size;
296 memory_size = memory_end - memory_start;
297
298 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
299
300 /*
301 * Map out the kernel text/data/bss from the available physical
302 * memory.
303 */
304 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
305 /* kernel size */
306 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
307 lmb_reserve(kstart, ksize);
308
309#if defined(CONFIG_BLK_DEV_INITRD)
310 /* Remove the init RAM disk from the available memory. */
311/* if (initrd_start) {
312 mem_pieces_remove(&phys_avail, __pa(initrd_start),
313 initrd_end - initrd_start, 1);
314 }*/
315#endif /* CONFIG_BLK_DEV_INITRD */
316
317 /* Initialize the MMU hardware */
318 mmu_init_hw();
319
320 /* Map in all of RAM starting at CONFIG_KERNEL_START */
321 mapin_ram();
322
323#ifdef HIGHMEM_START_BOOL
324 ioremap_base = HIGHMEM_START;
325#else
326 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
327#endif /* CONFIG_HIGHMEM */
328 ioremap_bot = ioremap_base;
329
330 /* Initialize the context management stuff */
331 mmu_context_init();
332}
333
334/* This is only called until mem_init is done. */
335void __init *early_get_page(void)
336{
337 void *p;
338 if (init_bootmem_done) {
339 p = alloc_bootmem_pages(PAGE_SIZE);
340 } else {
341 /*
342 * Mem start + 32MB -> here is limit
343 * because of mem mapping from head.S
344 */
345 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
346 memory_start + 0x2000000));
347 }
348 return p;
349}
350#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/mm/mmu_context.c b/arch/microblaze/mm/mmu_context.c
new file mode 100644
index 000000000000..26ff82f4fa8f
--- /dev/null
+++ b/arch/microblaze/mm/mmu_context.c
@@ -0,0 +1,70 @@
1/*
2 * This file contains the routines for handling the MMU.
3 *
4 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
5 *
6 * Derived from arch/ppc/mm/4xx_mmu.c:
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 *
17 * Derived from "arch/i386/mm/init.c"
18 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 *
25 */
26
27#include <linux/mm.h>
28#include <linux/init.h>
29
30#include <asm/tlbflush.h>
31#include <asm/mmu_context.h>
32
33mm_context_t next_mmu_context;
34unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
35atomic_t nr_free_contexts;
36struct mm_struct *context_mm[LAST_CONTEXT+1];
37
38/*
39 * Initialize the context management stuff.
40 */
41void __init mmu_context_init(void)
42{
43 /*
44 * The use of context zero is reserved for the kernel.
45 * This code assumes FIRST_CONTEXT < 32.
46 */
47 context_map[0] = (1 << FIRST_CONTEXT) - 1;
48 next_mmu_context = FIRST_CONTEXT;
49 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
50}
51
52/*
53 * Steal a context from a task that has one at the moment.
54 *
55 * This isn't an LRU system, it just frees up each context in
56 * turn (sort-of pseudo-random replacement :). This would be the
57 * place to implement an LRU scheme if anyone were motivated to do it.
58 */
59void steal_context(void)
60{
61 struct mm_struct *mm;
62
63 /* free up context `next_mmu_context' */
64 /* if we shouldn't free context 0, don't... */
65 if (next_mmu_context < FIRST_CONTEXT)
66 next_mmu_context = FIRST_CONTEXT;
67 mm = context_mm[next_mmu_context];
68 flush_tlb_mm(mm);
69 destroy_context(mm);
70}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
new file mode 100644
index 000000000000..46c4ca5d15c5
--- /dev/null
+++ b/arch/microblaze/mm/pgtable.c
@@ -0,0 +1,286 @@
1/*
2 * This file contains the routines setting up the linux page tables.
3 *
4 * Copyright (C) 2008 Michal Simek
5 * Copyright (C) 2008 PetaLogix
6 *
7 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
8 *
9 * Derived from arch/ppc/mm/pgtable.c:
10 * -- paulus
11 *
12 * Derived from arch/ppc/mm/init.c:
13 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14 *
15 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
17 * Copyright (C) 1996 Paul Mackerras
18 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This file is subject to the terms and conditions of the GNU General
24 * Public License. See the file COPYING in the main directory of this
25 * archive for more details.
26 *
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/vmalloc.h>
33#include <linux/init.h>
34
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
37#include <linux/io.h>
38#include <asm/mmu.h>
39#include <asm/sections.h>
40
41#define flush_HPTE(X, va, pg) _tlbie(va)
42
43unsigned long ioremap_base;
44unsigned long ioremap_bot;
45
46/* The maximum lowmem defaults to 768Mb, but this can be configured to
47 * another value.
48 */
49#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
50
51#ifndef CONFIG_SMP
52struct pgtable_cache_struct quicklists;
53#endif
54
55static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
56 unsigned long flags)
57{
58 unsigned long v, i;
59 phys_addr_t p;
60 int err;
61
62 /*
63 * Choose an address to map it to.
64 * Once the vmalloc system is running, we use it.
65 * Before then, we use space going down from ioremap_base
66 * (ioremap_bot records where we're up to).
67 */
68 p = addr & PAGE_MASK;
69 size = PAGE_ALIGN(addr + size) - p;
70
71 /*
72 * Don't allow anybody to remap normal RAM that we're using.
73 * mem_init() sets high_memory so only do the check after that.
74 *
75 * However, allow remap of rootfs: TBD
76 */
77 if (mem_init_done &&
78 p >= memory_start && p < virt_to_phys(high_memory) &&
79 !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
80 p < virt_to_phys((unsigned long)__bss_stop))) {
81 printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
82 " is RAM lr %p\n", (unsigned long)p,
83 __builtin_return_address(0));
84 return NULL;
85 }
86
87 if (size == 0)
88 return NULL;
89
90 /*
91 * Is it already mapped? If the whole area is mapped then we're
92 * done, otherwise remap it since we want to keep the virt addrs for
93 * each request contiguous.
94 *
95 * We make the assumption here that if the bottom and top
96 * of the range we want are mapped then it's mapped to the
97 * same virt address (and this is contiguous).
98 * -- Cort
99 */
100
101 if (mem_init_done) {
102 struct vm_struct *area;
103 area = get_vm_area(size, VM_IOREMAP);
104 if (area == NULL)
105 return NULL;
106 v = VMALLOC_VMADDR(area->addr);
107 } else {
108 v = (ioremap_bot -= size);
109 }
110
111 if ((flags & _PAGE_PRESENT) == 0)
112 flags |= _PAGE_KERNEL;
113 if (flags & _PAGE_NO_CACHE)
114 flags |= _PAGE_GUARDED;
115
116 err = 0;
117 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
118 err = map_page(v + i, p + i, flags);
119 if (err) {
120 if (mem_init_done)
121 vfree((void *)v);
122 return NULL;
123 }
124
125 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
126}
127
128void __iomem *ioremap(phys_addr_t addr, unsigned long size)
129{
130 return __ioremap(addr, size, _PAGE_NO_CACHE);
131}
132EXPORT_SYMBOL(ioremap);
133
134void iounmap(void *addr)
135{
136 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
137 vfree((void *) (PAGE_MASK & (unsigned long) addr));
138}
139EXPORT_SYMBOL(iounmap);
140
141
142int map_page(unsigned long va, phys_addr_t pa, int flags)
143{
144 pmd_t *pd;
145 pte_t *pg;
146 int err = -ENOMEM;
147 /* spin_lock(&init_mm.page_table_lock); */
148 /* Use upper 10 bits of VA to index the first level map */
149 pd = pmd_offset(pgd_offset_k(va), va);
150 /* Use middle 10 bits of VA to index the second-level map */
151 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
152 /* pg = pte_alloc_kernel(&init_mm, pd, va); */
153
154 if (pg != NULL) {
155 err = 0;
156 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
157 __pgprot(flags)));
158 if (mem_init_done)
159 flush_HPTE(0, va, pmd_val(*pd));
160 /* flush_HPTE(0, va, pg); */
161
162 }
163 /* spin_unlock(&init_mm.page_table_lock); */
164 return err;
165}
166
167void __init adjust_total_lowmem(void)
168{
169/* TBD */
170#if 0
171 unsigned long max_low_mem = MAX_LOW_MEM;
172
173 if (total_lowmem > max_low_mem) {
174 total_lowmem = max_low_mem;
175#ifndef CONFIG_HIGHMEM
176 printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
177 "CONFIG_HIGHMEM to reach %ld Mb\n",
178 max_low_mem >> 20, total_memory >> 20);
179 total_memory = total_lowmem;
180#endif /* CONFIG_HIGHMEM */
181 }
182#endif
183}
184
185static void show_tmem(unsigned long tmem)
186{
187 volatile unsigned long a;
188 a = a + tmem;
189}
190
191/*
192 * Map in all of physical memory starting at CONFIG_KERNEL_START.
193 */
194void __init mapin_ram(void)
195{
196 unsigned long v, p, s, f;
197
198 v = CONFIG_KERNEL_START;
199 p = memory_start;
200 show_tmem(memory_size);
201 for (s = 0; s < memory_size; s += PAGE_SIZE) {
202 f = _PAGE_PRESENT | _PAGE_ACCESSED |
203 _PAGE_SHARED | _PAGE_HWEXEC;
204 if ((char *) v < _stext || (char *) v >= _etext)
205 f |= _PAGE_WRENABLE;
206 else
207 /* On the MicroBlaze, no user access
208 forces R/W kernel access */
209 f |= _PAGE_USER;
210 map_page(v, p, f);
211 v += PAGE_SIZE;
212 p += PAGE_SIZE;
213 }
214}
215
216/* is x a power of 2? */
217#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
218
219/*
220 * Set up a mapping for a block of I/O.
221 * virt, phys, size must all be page-aligned.
222 * This should only be called before ioremap is called.
223 */
224void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
225 unsigned int size, int flags)
226{
227 int i;
228
229 if (virt > CONFIG_KERNEL_START && virt < ioremap_bot)
230 ioremap_bot = ioremap_base = virt;
231
232 /* Put it in the page tables. */
233 for (i = 0; i < size; i += PAGE_SIZE)
234 map_page(virt + i, phys + i, flags);
235}
236
237/* Scan the real Linux page tables and return a PTE pointer for
238 * a virtual address in a context.
239 * Returns true (1) if PTE was found, zero otherwise. The pointer to
240 * the PTE pointer is unmodified if PTE is not found.
241 */
242static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
243{
244 pgd_t *pgd;
245 pmd_t *pmd;
246 pte_t *pte;
247 int retval = 0;
248
249 pgd = pgd_offset(mm, addr & PAGE_MASK);
250 if (pgd) {
251 pmd = pmd_offset(pgd, addr & PAGE_MASK);
252 if (pmd_present(*pmd)) {
253 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
254 if (pte) {
255 retval = 1;
256 *ptep = pte;
257 }
258 }
259 }
260 return retval;
261}
262
263/* Find physical address for this virtual address. Normally used by
264 * I/O functions, but anyone can call it.
265 */
266unsigned long iopa(unsigned long addr)
267{
268 unsigned long pa;
269
270 pte_t *pte;
271 struct mm_struct *mm;
272
273 /* Allow mapping of user addresses (within the thread)
274 * for DMA if necessary.
275 */
276 if (addr < TASK_SIZE)
277 mm = current->mm;
278 else
279 mm = &init_mm;
280
281 pa = 0;
282 if (get_pteptr(mm, addr, &pte))
283 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
284
285 return pa;
286}
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
deleted file mode 100644
index 2562f8f9be0e..000000000000
--- a/arch/mips/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SUSPEND_H
2#define __ASM_SUSPEND_H
3
4/* Somewhen... Maybe :-) */
5
6#endif /* __ASM_SUSPEND_H */
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 1f60e27523d9..3e9100dcc12d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -68,8 +68,6 @@ void *module_alloc(unsigned long size)
68void module_free(struct module *mod, void *module_region) 68void module_free(struct module *mod, void *module_region)
69{ 69{
70 vfree(module_region); 70 vfree(module_region);
71 /* FIXME: If module_region == mod->init_region, trim exception
72 table entries. */
73} 71}
74 72
75int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 73int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c
index 6b287f2e8e84..4fa0e3648d8e 100644
--- a/arch/mn10300/kernel/module.c
+++ b/arch/mn10300/kernel/module.c
@@ -48,8 +48,6 @@ void *module_alloc(unsigned long size)
48void module_free(struct module *mod, void *module_region) 48void module_free(struct module *mod, void *module_region)
49{ 49{
50 vfree(module_region); 50 vfree(module_region);
51 /* FIXME: If module_region == mod->init_region, trim exception
52 * table entries. */
53} 51}
54 52
55/* 53/*
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index ecd1c5024447..ef5caf2e6ed0 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -267,8 +267,6 @@ void module_free(struct module *mod, void *module_region)
267 mod->arch.section = NULL; 267 mod->arch.section = NULL;
268 268
269 vfree(module_region); 269 vfree(module_region);
270 /* FIXME: If module_region == mod->init_region, trim exception
271 table entries. */
272} 270}
273 271
274/* Additional bytes needed in front of individual sections */ 272/* Additional bytes needed in front of individual sections */
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index a218da6bec7c..fb8412057450 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -28,6 +28,10 @@
28#define MPC52xx_PSC_MAXNUM 6 28#define MPC52xx_PSC_MAXNUM 6
29 29
30/* Programmable Serial Controller (PSC) status register bits */ 30/* Programmable Serial Controller (PSC) status register bits */
31#define MPC52xx_PSC_SR_UNEX_RX 0x0001
32#define MPC52xx_PSC_SR_DATA_VAL 0x0002
33#define MPC52xx_PSC_SR_DATA_OVR 0x0004
34#define MPC52xx_PSC_SR_CMDSEND 0x0008
31#define MPC52xx_PSC_SR_CDE 0x0080 35#define MPC52xx_PSC_SR_CDE 0x0080
32#define MPC52xx_PSC_SR_RXRDY 0x0100 36#define MPC52xx_PSC_SR_RXRDY 0x0100
33#define MPC52xx_PSC_SR_RXFULL 0x0200 37#define MPC52xx_PSC_SR_RXFULL 0x0200
@@ -61,6 +65,12 @@
61#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001 65#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001
62 66
63/* PSC interrupt status/mask bits */ 67/* PSC interrupt status/mask bits */
68#define MPC52xx_PSC_IMR_UNEX_RX_SLOT 0x0001
69#define MPC52xx_PSC_IMR_DATA_VALID 0x0002
70#define MPC52xx_PSC_IMR_DATA_OVR 0x0004
71#define MPC52xx_PSC_IMR_CMD_SEND 0x0008
72#define MPC52xx_PSC_IMR_ERROR 0x0040
73#define MPC52xx_PSC_IMR_DEOF 0x0080
64#define MPC52xx_PSC_IMR_TXRDY 0x0100 74#define MPC52xx_PSC_IMR_TXRDY 0x0100
65#define MPC52xx_PSC_IMR_RXRDY 0x0200 75#define MPC52xx_PSC_IMR_RXRDY 0x0200
66#define MPC52xx_PSC_IMR_DB 0x0400 76#define MPC52xx_PSC_IMR_DB 0x0400
@@ -117,6 +127,7 @@
117#define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24) 127#define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24)
118#define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24) 128#define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24)
119#define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24) 129#define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24)
130#define MPC52xx_PSC_SICR_AWR (1 << 30)
120#define MPC52xx_PSC_SICR_GENCLK (1 << 23) 131#define MPC52xx_PSC_SICR_GENCLK (1 << 23)
121#define MPC52xx_PSC_SICR_I2S (1 << 22) 132#define MPC52xx_PSC_SICR_I2S (1 << 22)
122#define MPC52xx_PSC_SICR_CLKPOL (1 << 21) 133#define MPC52xx_PSC_SICR_CLKPOL (1 << 21)
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 43e7e3a7f130..477c663e0140 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -43,8 +43,6 @@ void *module_alloc(unsigned long size)
43void module_free(struct module *mod, void *module_region) 43void module_free(struct module *mod, void *module_region)
44{ 44{
45 vfree(module_region); 45 vfree(module_region);
46 /* FIXME: If module_region == mod->init_region, trim exception
47 table entries. */
48} 46}
49 47
50static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, 48static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index b3f7d1216bae..b72e7a19d054 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -294,12 +294,12 @@ static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
294} 294}
295 295
296static int power7_generic_events[] = { 296static int power7_generic_events[] = {
297 [PERF_COUNT_CPU_CYCLES] = 0x1e, 297 [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
298 [PERF_COUNT_INSTRUCTIONS] = 2, 298 [PERF_COUNT_HW_INSTRUCTIONS] = 2,
299 [PERF_COUNT_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU */ 299 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/
300 [PERF_COUNT_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ 300 [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
301 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ 301 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
302 [PERF_COUNT_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ 302 [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
303}; 303};
304 304
305#define C(x) PERF_COUNT_HW_CACHE_##x 305#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644
index 1f34580e67a7..000000000000
--- a/arch/s390/include/asm/suspend.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifndef __ASM_S390_SUSPEND_H
2#define __ASM_S390_SUSPEND_H
3
4#endif
5
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index eed4a00cb676..ab2e3ed28abc 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -56,8 +56,6 @@ void *module_alloc(unsigned long size)
56void module_free(struct module *mod, void *module_region) 56void module_free(struct module *mod, void *module_region)
57{ 57{
58 vfree(module_region); 58 vfree(module_region);
59 /* FIXME: If module_region == mod->init_region, trim exception
60 table entries. */
61} 59}
62 60
63static void 61static void
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index c19b0f7d2cc1..c2efdcde266f 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -46,8 +46,6 @@ void *module_alloc(unsigned long size)
46void module_free(struct module *mod, void *module_region) 46void module_free(struct module *mod, void *module_region)
47{ 47{
48 vfree(module_region); 48 vfree(module_region);
49 /* FIXME: If module_region == mod->init_region, trim exception
50 table entries. */
51} 49}
52 50
53/* We don't need anything special. */ 51/* We don't need anything special. */
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 47d5619d43fa..8303ac481034 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -17,6 +17,9 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19 19
20#define ARCH_HAS_SORT_EXTABLE
21#define ARCH_HAS_SEARCH_EXTABLE
22
20/* Sparc is not segmented, however we need to be able to fool access_ok() 23/* Sparc is not segmented, however we need to be able to fool access_ok()
21 * when doing system calls from kernel mode legitimately. 24 * when doing system calls from kernel mode legitimately.
22 * 25 *
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 90273765e81f..0ee642f63234 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -75,8 +75,6 @@ void *module_alloc(unsigned long size)
75void module_free(struct module *mod, void *module_region) 75void module_free(struct module *mod, void *module_region)
76{ 76{
77 vfree(module_region); 77 vfree(module_region);
78 /* FIXME: If module_region == mod->init_region, trim exception
79 table entries. */
80} 78}
81 79
82/* Make generic code ignore STT_REGISTER dummy undefined symbols. */ 80/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
index 16cc28935e39..a61c349448e1 100644
--- a/arch/sparc/mm/extable.c
+++ b/arch/sparc/mm/extable.c
@@ -28,6 +28,10 @@ search_extable(const struct exception_table_entry *start,
28 * word 3: last insn address + 4 bytes 28 * word 3: last insn address + 4 bytes
29 * word 4: fixup code address 29 * word 4: fixup code address
30 * 30 *
31 * Deleted entries are encoded as:
32 * word 1: unused
33 * word 2: -1
34 *
31 * See asm/uaccess.h for more details. 35 * See asm/uaccess.h for more details.
32 */ 36 */
33 37
@@ -39,6 +43,10 @@ search_extable(const struct exception_table_entry *start,
39 continue; 43 continue;
40 } 44 }
41 45
46 /* A deleted entry; see trim_init_extable */
47 if (walk->fixup == -1)
48 continue;
49
42 if (walk->insn == value) 50 if (walk->insn == value)
43 return walk; 51 return walk;
44 } 52 }
@@ -57,6 +65,27 @@ search_extable(const struct exception_table_entry *start,
57 return NULL; 65 return NULL;
58} 66}
59 67
68#ifdef CONFIG_MODULES
69/* We could memmove them around; easier to mark the trimmed ones. */
70void trim_init_extable(struct module *m)
71{
72 unsigned int i;
73 bool range;
74
75 for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
76 range = m->extable[i].fixup == 0;
77
78 if (within_module_init(m->extable[i].insn, m)) {
79 m->extable[i].fixup = -1;
80 if (range)
81 m->extable[i+1].fixup = -1;
82 }
83 if (range)
84 i++;
85 }
86}
87#endif /* CONFIG_MODULES */
88
60/* Special extable search, which handles ranges. Returns fixup */ 89/* Special extable search, which handles ranges. Returns fixup */
61unsigned long search_extables_range(unsigned long addr, unsigned long *g2) 90unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
62{ 91{
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 58da2480a7f4..9ce3f165111a 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -53,16 +53,21 @@ extern unsigned long end_iomem;
53#else 53#else
54# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 54# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
55#endif 55#endif
56#define MODULES_VADDR VMALLOC_START
57#define MODULES_END VMALLOC_END
58#define MODULES_LEN (MODULES_VADDR - MODULES_END)
56 59
57#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 60#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
58#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 61#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
59#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 62#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
60 63#define __PAGE_KERNEL_EXEC \
64 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
61#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 65#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
62#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 66#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
63#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 67#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
64#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 68#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
65#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 69#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
70#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
66 71
67/* 72/*
68 * The i386 can't do page protection for execute, and considers that the same 73 * The i386 can't do page protection for execute, and considers that the same
diff --git a/arch/um/include/asm/suspend.h b/arch/um/include/asm/suspend.h
deleted file mode 100644
index f4e8e007f468..000000000000
--- a/arch/um/include/asm/suspend.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __UM_SUSPEND_H
2#define __UM_SUSPEND_H
3
4#endif
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 598b5c1903af..1b549bca4645 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -8,7 +8,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
8 8
9subarch-obj-y = lib/semaphore_32.o lib/string_32.o 9subarch-obj-y = lib/semaphore_32.o lib/string_32.o
10subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o 10subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
11subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o 11subarch-obj-$(CONFIG_MODULES) += kernel/module.o
12 12
13USER_OBJS := bugs.o ptrace_user.o fault.o 13USER_OBJS := bugs.o ptrace_user.o fault.o
14 14
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index c8b4cce9cfe1..2201e9c20e4a 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -8,10 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
8 setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \ 8 setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
9 sysrq.o ksyms.o tls.o 9 sysrq.o ksyms.o tls.o
10 10
11obj-$(CONFIG_MODULES) += um_module.o
12
13subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o 11subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
14subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o 12subarch-obj-$(CONFIG_MODULES) += kernel/module.o
15 13
16ldt-y = ../sys-i386/ldt.o 14ldt-y = ../sys-i386/ldt.o
17 15
diff --git a/arch/um/sys-x86_64/um_module.c b/arch/um/sys-x86_64/um_module.c
deleted file mode 100644
index 3dead392a415..000000000000
--- a/arch/um/sys-x86_64/um_module.c
+++ /dev/null
@@ -1,21 +0,0 @@
1#include <linux/vmalloc.h>
2#include <linux/moduleloader.h>
3
4/* Copied from i386 arch/i386/kernel/module.c */
5void *module_alloc(unsigned long size)
6{
7 if (size == 0)
8 return NULL;
9 return vmalloc_exec(size);
10}
11
12/* Free memory returned from module_alloc */
13void module_free(struct module *mod, void *module_region)
14{
15 vfree(module_region);
16 /*
17 * FIXME: If module_region == mod->init_region, trim exception
18 * table entries.
19 */
20}
21
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 1caf57628b9c..313389cd50d2 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -17,8 +17,13 @@
17/* Pages for switcher itself, then two pages per cpu */ 17/* Pages for switcher itself, then two pages per cpu */
18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) 18#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
19 19
20/* We map at -4M for ease of mapping into the guest (one PTE page). */ 20/* We map at -4M (-2M when PAE is activated) for ease of mapping
21 * into the guest (one PTE page). */
22#ifdef CONFIG_X86_PAE
23#define SWITCHER_ADDR 0xFFE00000
24#else
21#define SWITCHER_ADDR 0xFFC00000 25#define SWITCHER_ADDR 0xFFC00000
26#endif
22 27
23/* Found in switcher.S */ 28/* Found in switcher.S */
24extern unsigned long default_idt_entries[]; 29extern unsigned long default_idt_entries[];
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index faae1996487b..d31c4a684078 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -12,11 +12,13 @@
12#define LHCALL_TS 8 12#define LHCALL_TS 8
13#define LHCALL_SET_CLOCKEVENT 9 13#define LHCALL_SET_CLOCKEVENT 9
14#define LHCALL_HALT 10 14#define LHCALL_HALT 10
15#define LHCALL_SET_PMD 13
15#define LHCALL_SET_PTE 14 16#define LHCALL_SET_PTE 14
16#define LHCALL_SET_PMD 15 17#define LHCALL_SET_PGD 15
17#define LHCALL_LOAD_TLS 16 18#define LHCALL_LOAD_TLS 16
18#define LHCALL_NOTIFY 17 19#define LHCALL_NOTIFY 17
19#define LHCALL_LOAD_GDT_ENTRY 18 20#define LHCALL_LOAD_GDT_ENTRY 18
21#define LHCALL_SEND_INTERRUPTS 19
20 22
21#define LGUEST_TRAP_ENTRY 0x1F 23#define LGUEST_TRAP_ENTRY 0x1F
22 24
@@ -32,10 +34,10 @@
32 * operations? There are two ways: the direct way is to make a "hypercall", 34 * operations? There are two ways: the direct way is to make a "hypercall",
33 * to make requests of the Host Itself. 35 * to make requests of the Host Itself.
34 * 36 *
35 * We use the KVM hypercall mechanism. Eighteen hypercalls are 37 * We use the KVM hypercall mechanism. Seventeen hypercalls are
36 * available: the hypercall number is put in the %eax register, and the 38 * available: the hypercall number is put in the %eax register, and the
37 * arguments (when required) are placed in %ebx, %ecx and %edx. If a return 39 * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
38 * value makes sense, it's returned in %eax. 40 * If a return value makes sense, it's returned in %eax.
39 * 41 *
40 * Grossly invalid calls result in Sudden Death at the hands of the vengeful 42 * Grossly invalid calls result in Sudden Death at the hands of the vengeful
41 * Host, rather than returning failure. This reflects Winston Churchill's 43 * Host, rather than returning failure. This reflects Winston Churchill's
@@ -47,8 +49,9 @@
47 49
48#define LHCALL_RING_SIZE 64 50#define LHCALL_RING_SIZE 64
49struct hcall_args { 51struct hcall_args {
50 /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ 52 /* These map directly onto eax, ebx, ecx, edx and esi
51 unsigned long arg0, arg1, arg2, arg3; 53 * in struct lguest_regs */
54 unsigned long arg0, arg1, arg2, arg3, arg4;
52}; 55};
53 56
54#endif /* !__ASSEMBLY__ */ 57#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index 2733fad45f98..5e67c1532314 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -46,6 +46,10 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
46# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 46# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
47#endif 47#endif
48 48
49#define MODULES_VADDR VMALLOC_START
50#define MODULES_END VMALLOC_END
51#define MODULES_LEN (MODULES_VADDR - MODULES_END)
52
49#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) 53#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
50 54
51#endif /* _ASM_X86_PGTABLE_32_DEFS_H */ 55#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index a5ecc9c33e92..7f3eba08e7de 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -172,6 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
172 flush_tlb_all(); 172 flush_tlb_all();
173} 173}
174 174
175extern void zap_low_mappings(void); 175extern void zap_low_mappings(bool early);
176 176
177#endif /* _ASM_X86_TLBFLUSH_H */ 177#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4f78bd682125..f3477bb84566 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -73,7 +73,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
73obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 73obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
74obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 74obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
75obj-$(CONFIG_KPROBES) += kprobes.o 75obj-$(CONFIG_KPROBES) += kprobes.o
76obj-$(CONFIG_MODULES) += module_$(BITS).o 76obj-$(CONFIG_MODULES) += module.o
77obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o 77obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
78obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o 78obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
79obj-$(CONFIG_KGDB) += kgdb.o 79obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 7c243a2c5115..ca93638ba430 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -104,7 +104,7 @@ int acpi_save_state_mem(void)
104 initial_gs = per_cpu_offset(smp_processor_id()); 104 initial_gs = per_cpu_offset(smp_processor_id());
105#endif 105#endif
106 initial_code = (unsigned long)wakeup_long64; 106 initial_code = (unsigned long)wakeup_long64;
107 saved_magic = 0x123456789abcdef0; 107 saved_magic = 0x123456789abcdef0L;
108#endif /* CONFIG_64BIT */ 108#endif /* CONFIG_64BIT */
109 109
110 return 0; 110 return 0;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 49e0939bac42..79302e9a33a4 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1233,9 +1233,9 @@ static int suspend(int vetoable)
1233 int err; 1233 int err;
1234 struct apm_user *as; 1234 struct apm_user *as;
1235 1235
1236 device_suspend(PMSG_SUSPEND); 1236 dpm_suspend_start(PMSG_SUSPEND);
1237 1237
1238 device_power_down(PMSG_SUSPEND); 1238 dpm_suspend_noirq(PMSG_SUSPEND);
1239 1239
1240 local_irq_disable(); 1240 local_irq_disable();
1241 sysdev_suspend(PMSG_SUSPEND); 1241 sysdev_suspend(PMSG_SUSPEND);
@@ -1259,9 +1259,9 @@ static int suspend(int vetoable)
1259 sysdev_resume(); 1259 sysdev_resume();
1260 local_irq_enable(); 1260 local_irq_enable();
1261 1261
1262 device_power_up(PMSG_RESUME); 1262 dpm_resume_noirq(PMSG_RESUME);
1263 1263
1264 device_resume(PMSG_RESUME); 1264 dpm_resume_end(PMSG_RESUME);
1265 queue_event(APM_NORMAL_RESUME, NULL); 1265 queue_event(APM_NORMAL_RESUME, NULL);
1266 spin_lock(&user_list_lock); 1266 spin_lock(&user_list_lock);
1267 for (as = user_list; as != NULL; as = as->next) { 1267 for (as = user_list; as != NULL; as = as->next) {
@@ -1277,7 +1277,7 @@ static void standby(void)
1277{ 1277{
1278 int err; 1278 int err;
1279 1279
1280 device_power_down(PMSG_SUSPEND); 1280 dpm_suspend_noirq(PMSG_SUSPEND);
1281 1281
1282 local_irq_disable(); 1282 local_irq_disable();
1283 sysdev_suspend(PMSG_SUSPEND); 1283 sysdev_suspend(PMSG_SUSPEND);
@@ -1291,7 +1291,7 @@ static void standby(void)
1291 sysdev_resume(); 1291 sysdev_resume();
1292 local_irq_enable(); 1292 local_irq_enable();
1293 1293
1294 device_power_up(PMSG_RESUME); 1294 dpm_resume_noirq(PMSG_RESUME);
1295} 1295}
1296 1296
1297static apm_event_t get_event(void) 1297static apm_event_t get_event(void)
@@ -1376,7 +1376,7 @@ static void check_events(void)
1376 ignore_bounce = 1; 1376 ignore_bounce = 1;
1377 if ((event != APM_NORMAL_RESUME) 1377 if ((event != APM_NORMAL_RESUME)
1378 || (ignore_normal_resume == 0)) { 1378 || (ignore_normal_resume == 0)) {
1379 device_resume(PMSG_RESUME); 1379 dpm_resume_end(PMSG_RESUME);
1380 queue_event(event, NULL); 1380 queue_event(event, NULL);
1381 } 1381 }
1382 ignore_normal_resume = 0; 1382 ignore_normal_resume = 0;
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 1a830cbd7015..dfdbf6403895 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -126,6 +126,7 @@ void foo(void)
126#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) 126#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
127 BLANK(); 127 BLANK();
128 OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); 128 OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
129 OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
129 OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); 130 OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
130 131
131 BLANK(); 132 BLANK();
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 895c82e78455..275bc142cd5d 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -968,6 +968,13 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
968 if (!x86_pmu.num_counters_fixed) 968 if (!x86_pmu.num_counters_fixed)
969 return -1; 969 return -1;
970 970
971 /*
972 * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
973 */
974 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
975 boot_cpu_data.x86_model == 28)
976 return -1;
977
971 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 978 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
972 979
973 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 980 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module.c
index c23880b90b5c..89f386f044e4 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module.c
@@ -1,6 +1,5 @@
1/* Kernel module help for x86-64 1/* Kernel module help for x86.
2 Copyright (C) 2001 Rusty Russell. 2 Copyright (C) 2001 Rusty Russell.
3 Copyright (C) 2002,2003 Andi Kleen, SuSE Labs.
4 3
5 This program is free software; you can redistribute it and/or modify 4 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by 5 it under the terms of the GNU General Public License as published by
@@ -22,23 +21,18 @@
22#include <linux/fs.h> 21#include <linux/fs.h>
23#include <linux/string.h> 22#include <linux/string.h>
24#include <linux/kernel.h> 23#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/slab.h>
27#include <linux/bug.h> 24#include <linux/bug.h>
25#include <linux/mm.h>
28 26
29#include <asm/system.h> 27#include <asm/system.h>
30#include <asm/page.h> 28#include <asm/page.h>
31#include <asm/pgtable.h> 29#include <asm/pgtable.h>
32 30
31#if 0
32#define DEBUGP printk
33#else
33#define DEBUGP(fmt...) 34#define DEBUGP(fmt...)
34 35#endif
35#ifndef CONFIG_UML
36void module_free(struct module *mod, void *module_region)
37{
38 vfree(module_region);
39 /* FIXME: If module_region == mod->init_region, trim exception
40 table entries. */
41}
42 36
43void *module_alloc(unsigned long size) 37void *module_alloc(unsigned long size)
44{ 38{
@@ -54,9 +48,15 @@ void *module_alloc(unsigned long size)
54 if (!area) 48 if (!area)
55 return NULL; 49 return NULL;
56 50
57 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); 51 return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
52 PAGE_KERNEL_EXEC);
53}
54
55/* Free memory returned from module_alloc */
56void module_free(struct module *mod, void *module_region)
57{
58 vfree(module_region);
58} 59}
59#endif
60 60
61/* We don't need anything special. */ 61/* We don't need anything special. */
62int module_frob_arch_sections(Elf_Ehdr *hdr, 62int module_frob_arch_sections(Elf_Ehdr *hdr,
@@ -67,6 +67,58 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
67 return 0; 67 return 0;
68} 68}
69 69
70#ifdef CONFIG_X86_32
71int apply_relocate(Elf32_Shdr *sechdrs,
72 const char *strtab,
73 unsigned int symindex,
74 unsigned int relsec,
75 struct module *me)
76{
77 unsigned int i;
78 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
79 Elf32_Sym *sym;
80 uint32_t *location;
81
82 DEBUGP("Applying relocate section %u to %u\n", relsec,
83 sechdrs[relsec].sh_info);
84 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
85 /* This is where to make the change */
86 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
87 + rel[i].r_offset;
88 /* This is the symbol it is referring to. Note that all
89 undefined symbols have been resolved. */
90 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
91 + ELF32_R_SYM(rel[i].r_info);
92
93 switch (ELF32_R_TYPE(rel[i].r_info)) {
94 case R_386_32:
95 /* We add the value into the location given */
96 *location += sym->st_value;
97 break;
98 case R_386_PC32:
99 /* Add the value, subtract its postition */
100 *location += sym->st_value - (uint32_t)location;
101 break;
102 default:
103 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
104 me->name, ELF32_R_TYPE(rel[i].r_info));
105 return -ENOEXEC;
106 }
107 }
108 return 0;
109}
110
111int apply_relocate_add(Elf32_Shdr *sechdrs,
112 const char *strtab,
113 unsigned int symindex,
114 unsigned int relsec,
115 struct module *me)
116{
117 printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
118 me->name);
119 return -ENOEXEC;
120}
121#else /*X86_64*/
70int apply_relocate_add(Elf64_Shdr *sechdrs, 122int apply_relocate_add(Elf64_Shdr *sechdrs,
71 const char *strtab, 123 const char *strtab,
72 unsigned int symindex, 124 unsigned int symindex,
@@ -147,6 +199,8 @@ int apply_relocate(Elf_Shdr *sechdrs,
147 return -ENOSYS; 199 return -ENOSYS;
148} 200}
149 201
202#endif
203
150int module_finalize(const Elf_Ehdr *hdr, 204int module_finalize(const Elf_Ehdr *hdr,
151 const Elf_Shdr *sechdrs, 205 const Elf_Shdr *sechdrs,
152 struct module *me) 206 struct module *me)
diff --git a/arch/x86/kernel/module_32.c b/arch/x86/kernel/module_32.c
deleted file mode 100644
index 0edd819050e7..000000000000
--- a/arch/x86/kernel/module_32.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/* Kernel module help for i386.
2 Copyright (C) 2001 Rusty Russell.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/
18#include <linux/moduleloader.h>
19#include <linux/elf.h>
20#include <linux/vmalloc.h>
21#include <linux/fs.h>
22#include <linux/string.h>
23#include <linux/kernel.h>
24#include <linux/bug.h>
25
26#if 0
27#define DEBUGP printk
28#else
29#define DEBUGP(fmt...)
30#endif
31
32void *module_alloc(unsigned long size)
33{
34 if (size == 0)
35 return NULL;
36 return vmalloc_exec(size);
37}
38
39
40/* Free memory returned from module_alloc */
41void module_free(struct module *mod, void *module_region)
42{
43 vfree(module_region);
44 /* FIXME: If module_region == mod->init_region, trim exception
45 table entries. */
46}
47
48/* We don't need anything special. */
49int module_frob_arch_sections(Elf_Ehdr *hdr,
50 Elf_Shdr *sechdrs,
51 char *secstrings,
52 struct module *mod)
53{
54 return 0;
55}
56
57int apply_relocate(Elf32_Shdr *sechdrs,
58 const char *strtab,
59 unsigned int symindex,
60 unsigned int relsec,
61 struct module *me)
62{
63 unsigned int i;
64 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
65 Elf32_Sym *sym;
66 uint32_t *location;
67
68 DEBUGP("Applying relocate section %u to %u\n", relsec,
69 sechdrs[relsec].sh_info);
70 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
71 /* This is where to make the change */
72 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
73 + rel[i].r_offset;
74 /* This is the symbol it is referring to. Note that all
75 undefined symbols have been resolved. */
76 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
77 + ELF32_R_SYM(rel[i].r_info);
78
79 switch (ELF32_R_TYPE(rel[i].r_info)) {
80 case R_386_32:
81 /* We add the value into the location given */
82 *location += sym->st_value;
83 break;
84 case R_386_PC32:
85 /* Add the value, subtract its postition */
86 *location += sym->st_value - (uint32_t)location;
87 break;
88 default:
89 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
90 me->name, ELF32_R_TYPE(rel[i].r_info));
91 return -ENOEXEC;
92 }
93 }
94 return 0;
95}
96
97int apply_relocate_add(Elf32_Shdr *sechdrs,
98 const char *strtab,
99 unsigned int symindex,
100 unsigned int relsec,
101 struct module *me)
102{
103 printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
104 me->name);
105 return -ENOEXEC;
106}
107
108int module_finalize(const Elf_Ehdr *hdr,
109 const Elf_Shdr *sechdrs,
110 struct module *me)
111{
112 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
113 *para = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115
116 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
117 if (!strcmp(".text", secstrings + s->sh_name))
118 text = s;
119 if (!strcmp(".altinstructions", secstrings + s->sh_name))
120 alt = s;
121 if (!strcmp(".smp_locks", secstrings + s->sh_name))
122 locks = s;
123 if (!strcmp(".parainstructions", secstrings + s->sh_name))
124 para = s;
125 }
126
127 if (alt) {
128 /* patch .altinstructions */
129 void *aseg = (void *)alt->sh_addr;
130 apply_alternatives(aseg, aseg + alt->sh_size);
131 }
132 if (locks && text) {
133 void *lseg = (void *)locks->sh_addr;
134 void *tseg = (void *)text->sh_addr;
135 alternatives_smp_module_add(me, me->name,
136 lseg, lseg + locks->sh_size,
137 tseg, tseg + text->sh_size);
138 }
139
140 if (para) {
141 void *pseg = (void *)para->sh_addr;
142 apply_paravirt(pseg, pseg + para->sh_size);
143 }
144
145 return module_bug_finalize(hdr, sechdrs, me);
146}
147
148void module_arch_cleanup(struct module *mod)
149{
150 alternatives_smp_module_del(mod);
151 module_bug_cleanup(mod);
152}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d1c636bf31a7..be5ae80f897f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -301,15 +301,13 @@ static void __init reserve_brk(void)
301 301
302#ifdef CONFIG_BLK_DEV_INITRD 302#ifdef CONFIG_BLK_DEV_INITRD
303 303
304#ifdef CONFIG_X86_32
305
306#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 304#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
307static void __init relocate_initrd(void) 305static void __init relocate_initrd(void)
308{ 306{
309 307
310 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 308 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
311 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 309 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
312 u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; 310 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
313 u64 ramdisk_here; 311 u64 ramdisk_here;
314 unsigned long slop, clen, mapaddr; 312 unsigned long slop, clen, mapaddr;
315 char *p, *q; 313 char *p, *q;
@@ -365,14 +363,13 @@ static void __init relocate_initrd(void)
365 ramdisk_image, ramdisk_image + ramdisk_size - 1, 363 ramdisk_image, ramdisk_image + ramdisk_size - 1,
366 ramdisk_here, ramdisk_here + ramdisk_size - 1); 364 ramdisk_here, ramdisk_here + ramdisk_size - 1);
367} 365}
368#endif
369 366
370static void __init reserve_initrd(void) 367static void __init reserve_initrd(void)
371{ 368{
372 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 369 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
373 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 370 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
374 u64 ramdisk_end = ramdisk_image + ramdisk_size; 371 u64 ramdisk_end = ramdisk_image + ramdisk_size;
375 u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; 372 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
376 373
377 if (!boot_params.hdr.type_of_loader || 374 if (!boot_params.hdr.type_of_loader ||
378 !ramdisk_image || !ramdisk_size) 375 !ramdisk_image || !ramdisk_size)
@@ -402,14 +399,8 @@ static void __init reserve_initrd(void)
402 return; 399 return;
403 } 400 }
404 401
405#ifdef CONFIG_X86_32
406 relocate_initrd(); 402 relocate_initrd();
407#else 403
408 printk(KERN_ERR "initrd extends beyond end of memory "
409 "(0x%08llx > 0x%08llx)\ndisabling initrd\n",
410 ramdisk_end, end_of_lowmem);
411 initrd_start = 0;
412#endif
413 free_early(ramdisk_image, ramdisk_end); 404 free_early(ramdisk_image, ramdisk_end);
414} 405}
415#else 406#else
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7c80007ea5f7..2fecda69ee64 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -873,7 +873,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
873 873
874 err = do_boot_cpu(apicid, cpu); 874 err = do_boot_cpu(apicid, cpu);
875 875
876 zap_low_mappings(); 876 zap_low_mappings(false);
877 low_mappings = 0; 877 low_mappings = 0;
878#else 878#else
879 err = do_boot_cpu(apicid, cpu); 879 err = do_boot_cpu(apicid, cpu);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 4c85b2e2bb65..367e87882041 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -108,6 +108,8 @@ SECTIONS
108 /* Data */ 108 /* Data */
109 . = ALIGN(PAGE_SIZE); 109 . = ALIGN(PAGE_SIZE);
110 .data : AT(ADDR(.data) - LOAD_OFFSET) { 110 .data : AT(ADDR(.data) - LOAD_OFFSET) {
111 /* Start of data section */
112 _sdata = .;
111 DATA_DATA 113 DATA_DATA
112 CONSTRUCTORS 114 CONSTRUCTORS
113 115
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 8dab8f7844d3..38718041efc3 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,7 +2,6 @@ config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 select PARAVIRT 3 select PARAVIRT
4 depends on X86_32 4 depends on X86_32
5 depends on !X86_PAE
6 select VIRTIO 5 select VIRTIO
7 select VIRTIO_RING 6 select VIRTIO_RING
8 select VIRTIO_CONSOLE 7 select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4e0c26559395..7bc65f0f62c4 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -87,7 +87,7 @@ struct lguest_data lguest_data = {
87 87
88/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a 88/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
89 * ring buffer of stored hypercalls which the Host will run though next time we 89 * ring buffer of stored hypercalls which the Host will run though next time we
90 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall 90 * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall
91 * arguments, and a "hcall_status" word which is 0 if the call is ready to go, 91 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
92 * and 255 once the Host has finished with it. 92 * and 255 once the Host has finished with it.
93 * 93 *
@@ -96,7 +96,8 @@ struct lguest_data lguest_data = {
96 * effect of causing the Host to run all the stored calls in the ring buffer 96 * effect of causing the Host to run all the stored calls in the ring buffer
97 * which empties it for next time! */ 97 * which empties it for next time! */
98static void async_hcall(unsigned long call, unsigned long arg1, 98static void async_hcall(unsigned long call, unsigned long arg1,
99 unsigned long arg2, unsigned long arg3) 99 unsigned long arg2, unsigned long arg3,
100 unsigned long arg4)
100{ 101{
101 /* Note: This code assumes we're uniprocessor. */ 102 /* Note: This code assumes we're uniprocessor. */
102 static unsigned int next_call; 103 static unsigned int next_call;
@@ -108,12 +109,13 @@ static void async_hcall(unsigned long call, unsigned long arg1,
108 local_irq_save(flags); 109 local_irq_save(flags);
109 if (lguest_data.hcall_status[next_call] != 0xFF) { 110 if (lguest_data.hcall_status[next_call] != 0xFF) {
110 /* Table full, so do normal hcall which will flush table. */ 111 /* Table full, so do normal hcall which will flush table. */
111 kvm_hypercall3(call, arg1, arg2, arg3); 112 kvm_hypercall4(call, arg1, arg2, arg3, arg4);
112 } else { 113 } else {
113 lguest_data.hcalls[next_call].arg0 = call; 114 lguest_data.hcalls[next_call].arg0 = call;
114 lguest_data.hcalls[next_call].arg1 = arg1; 115 lguest_data.hcalls[next_call].arg1 = arg1;
115 lguest_data.hcalls[next_call].arg2 = arg2; 116 lguest_data.hcalls[next_call].arg2 = arg2;
116 lguest_data.hcalls[next_call].arg3 = arg3; 117 lguest_data.hcalls[next_call].arg3 = arg3;
118 lguest_data.hcalls[next_call].arg4 = arg4;
117 /* Arguments must all be written before we mark it to go */ 119 /* Arguments must all be written before we mark it to go */
118 wmb(); 120 wmb();
119 lguest_data.hcall_status[next_call] = 0; 121 lguest_data.hcall_status[next_call] = 0;
@@ -141,7 +143,7 @@ static void lazy_hcall1(unsigned long call,
141 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 143 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
142 kvm_hypercall1(call, arg1); 144 kvm_hypercall1(call, arg1);
143 else 145 else
144 async_hcall(call, arg1, 0, 0); 146 async_hcall(call, arg1, 0, 0, 0);
145} 147}
146 148
147static void lazy_hcall2(unsigned long call, 149static void lazy_hcall2(unsigned long call,
@@ -151,7 +153,7 @@ static void lazy_hcall2(unsigned long call,
151 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 153 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
152 kvm_hypercall2(call, arg1, arg2); 154 kvm_hypercall2(call, arg1, arg2);
153 else 155 else
154 async_hcall(call, arg1, arg2, 0); 156 async_hcall(call, arg1, arg2, 0, 0);
155} 157}
156 158
157static void lazy_hcall3(unsigned long call, 159static void lazy_hcall3(unsigned long call,
@@ -162,9 +164,23 @@ static void lazy_hcall3(unsigned long call,
162 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 164 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
163 kvm_hypercall3(call, arg1, arg2, arg3); 165 kvm_hypercall3(call, arg1, arg2, arg3);
164 else 166 else
165 async_hcall(call, arg1, arg2, arg3); 167 async_hcall(call, arg1, arg2, arg3, 0);
166} 168}
167 169
170#ifdef CONFIG_X86_PAE
171static void lazy_hcall4(unsigned long call,
172 unsigned long arg1,
173 unsigned long arg2,
174 unsigned long arg3,
175 unsigned long arg4)
176{
177 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
178 kvm_hypercall4(call, arg1, arg2, arg3, arg4);
179 else
180 async_hcall(call, arg1, arg2, arg3, arg4);
181}
182#endif
183
168/* When lazy mode is turned off reset the per-cpu lazy mode variable and then 184/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
169 * issue the do-nothing hypercall to flush any stored calls. */ 185 * issue the do-nothing hypercall to flush any stored calls. */
170static void lguest_leave_lazy_mmu_mode(void) 186static void lguest_leave_lazy_mmu_mode(void)
@@ -179,7 +195,7 @@ static void lguest_end_context_switch(struct task_struct *next)
179 paravirt_end_context_switch(next); 195 paravirt_end_context_switch(next);
180} 196}
181 197
182/*G:033 198/*G:032
183 * After that diversion we return to our first native-instruction 199 * After that diversion we return to our first native-instruction
184 * replacements: four functions for interrupt control. 200 * replacements: four functions for interrupt control.
185 * 201 *
@@ -199,30 +215,28 @@ static unsigned long save_fl(void)
199{ 215{
200 return lguest_data.irq_enabled; 216 return lguest_data.irq_enabled;
201} 217}
202PV_CALLEE_SAVE_REGS_THUNK(save_fl);
203
204/* restore_flags() just sets the flags back to the value given. */
205static void restore_fl(unsigned long flags)
206{
207 lguest_data.irq_enabled = flags;
208}
209PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
210 218
211/* Interrupts go off... */ 219/* Interrupts go off... */
212static void irq_disable(void) 220static void irq_disable(void)
213{ 221{
214 lguest_data.irq_enabled = 0; 222 lguest_data.irq_enabled = 0;
215} 223}
224
225/* Let's pause a moment. Remember how I said these are called so often?
226 * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
227 * break some rules. In particular, these functions are assumed to save their
228 * own registers if they need to: normal C functions assume they can trash the
229 * eax register. To use normal C functions, we use
230 * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
231 * C function, then restores it. */
232PV_CALLEE_SAVE_REGS_THUNK(save_fl);
216PV_CALLEE_SAVE_REGS_THUNK(irq_disable); 233PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
234/*:*/
217 235
218/* Interrupts go on... */ 236/* These are in i386_head.S */
219static void irq_enable(void) 237extern void lg_irq_enable(void);
220{ 238extern void lg_restore_fl(unsigned long flags);
221 lguest_data.irq_enabled = X86_EFLAGS_IF;
222}
223PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
224 239
225/*:*/
226/*M:003 Note that we don't check for outstanding interrupts when we re-enable 240/*M:003 Note that we don't check for outstanding interrupts when we re-enable
227 * them (or when we unmask an interrupt). This seems to work for the moment, 241 * them (or when we unmask an interrupt). This seems to work for the moment,
228 * since interrupts are rare and we'll just get the interrupt on the next timer 242 * since interrupts are rare and we'll just get the interrupt on the next timer
@@ -368,8 +382,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
368 case 1: /* Basic feature request. */ 382 case 1: /* Basic feature request. */
369 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ 383 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
370 *cx &= 0x00002201; 384 *cx &= 0x00002201;
371 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */ 385 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */
372 *dx &= 0x07808111; 386 *dx &= 0x07808151;
373 /* The Host can do a nice optimization if it knows that the 387 /* The Host can do a nice optimization if it knows that the
374 * kernel mappings (addresses above 0xC0000000 or whatever 388 * kernel mappings (addresses above 0xC0000000 or whatever
375 * PAGE_OFFSET is set to) haven't changed. But Linux calls 389 * PAGE_OFFSET is set to) haven't changed. But Linux calls
@@ -388,6 +402,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
388 if (*ax > 0x80000008) 402 if (*ax > 0x80000008)
389 *ax = 0x80000008; 403 *ax = 0x80000008;
390 break; 404 break;
405 case 0x80000001:
406 /* Here we should fix nx cap depending on host. */
407 /* For this version of PAE, we just clear NX bit. */
408 *dx &= ~(1 << 20);
409 break;
391 } 410 }
392} 411}
393 412
@@ -521,25 +540,52 @@ static void lguest_write_cr4(unsigned long val)
521static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, 540static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
522 pte_t *ptep) 541 pte_t *ptep)
523{ 542{
543#ifdef CONFIG_X86_PAE
544 lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
545 ptep->pte_low, ptep->pte_high);
546#else
524 lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); 547 lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
548#endif
525} 549}
526 550
527static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, 551static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
528 pte_t *ptep, pte_t pteval) 552 pte_t *ptep, pte_t pteval)
529{ 553{
530 *ptep = pteval; 554 native_set_pte(ptep, pteval);
531 lguest_pte_update(mm, addr, ptep); 555 lguest_pte_update(mm, addr, ptep);
532} 556}
533 557
534/* The Guest calls this to set a top-level entry. Again, we set the entry then 558/* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
535 * tell the Host which top-level page we changed, and the index of the entry we 559 * to set a middle-level entry when PAE is activated.
536 * changed. */ 560 * Again, we set the entry then tell the Host which page we changed,
561 * and the index of the entry we changed. */
562#ifdef CONFIG_X86_PAE
563static void lguest_set_pud(pud_t *pudp, pud_t pudval)
564{
565 native_set_pud(pudp, pudval);
566
567 /* 32 bytes aligned pdpt address and the index. */
568 lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
569 (__pa(pudp) & 0x1F) / sizeof(pud_t));
570}
571
537static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) 572static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
538{ 573{
539 *pmdp = pmdval; 574 native_set_pmd(pmdp, pmdval);
540 lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, 575 lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
541 (__pa(pmdp) & (PAGE_SIZE - 1)) / 4); 576 (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
542} 577}
578#else
579
580/* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not
581 * activated. */
582static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
583{
584 native_set_pmd(pmdp, pmdval);
585 lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
586 (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
587}
588#endif
543 589
544/* There are a couple of legacy places where the kernel sets a PTE, but we 590/* There are a couple of legacy places where the kernel sets a PTE, but we
545 * don't know the top level any more. This is useless for us, since we don't 591 * don't know the top level any more. This is useless for us, since we don't
@@ -552,11 +598,31 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
552 * which brings boot back to 0.25 seconds. */ 598 * which brings boot back to 0.25 seconds. */
553static void lguest_set_pte(pte_t *ptep, pte_t pteval) 599static void lguest_set_pte(pte_t *ptep, pte_t pteval)
554{ 600{
555 *ptep = pteval; 601 native_set_pte(ptep, pteval);
602 if (cr3_changed)
603 lazy_hcall1(LHCALL_FLUSH_TLB, 1);
604}
605
606#ifdef CONFIG_X86_PAE
607static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
608{
609 native_set_pte_atomic(ptep, pte);
556 if (cr3_changed) 610 if (cr3_changed)
557 lazy_hcall1(LHCALL_FLUSH_TLB, 1); 611 lazy_hcall1(LHCALL_FLUSH_TLB, 1);
558} 612}
559 613
614void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
615{
616 native_pte_clear(mm, addr, ptep);
617 lguest_pte_update(mm, addr, ptep);
618}
619
620void lguest_pmd_clear(pmd_t *pmdp)
621{
622 lguest_set_pmd(pmdp, __pmd(0));
623}
624#endif
625
560/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on 626/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
561 * native page table operations. On native hardware you can set a new page 627 * native page table operations. On native hardware you can set a new page
562 * table entry whenever you want, but if you want to remove one you have to do 628 * table entry whenever you want, but if you want to remove one you have to do
@@ -628,13 +694,12 @@ static void __init lguest_init_IRQ(void)
628{ 694{
629 unsigned int i; 695 unsigned int i;
630 696
631 for (i = 0; i < LGUEST_IRQS; i++) { 697 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
632 int vector = FIRST_EXTERNAL_VECTOR + i;
633 /* Some systems map "vectors" to interrupts weirdly. Lguest has 698 /* Some systems map "vectors" to interrupts weirdly. Lguest has
634 * a straightforward 1 to 1 mapping, so force that here. */ 699 * a straightforward 1 to 1 mapping, so force that here. */
635 __get_cpu_var(vector_irq)[vector] = i; 700 __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
636 if (vector != SYSCALL_VECTOR) 701 if (i != SYSCALL_VECTOR)
637 set_intr_gate(vector, interrupt[i]); 702 set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
638 } 703 }
639 /* This call is required to set up for 4k stacks, where we have 704 /* This call is required to set up for 4k stacks, where we have
640 * separate stacks for hard and soft interrupts. */ 705 * separate stacks for hard and soft interrupts. */
@@ -973,10 +1038,10 @@ static void lguest_restart(char *reason)
973 * 1038 *
974 * Our current solution is to allow the paravirt back end to optionally patch 1039 * Our current solution is to allow the paravirt back end to optionally patch
975 * over the indirect calls to replace them with something more efficient. We 1040 * over the indirect calls to replace them with something more efficient. We
976 * patch the four most commonly called functions: disable interrupts, enable 1041 * patch two of the simplest of the most commonly called functions: disable
977 * interrupts, restore interrupts and save interrupts. We usually have 6 or 10 1042 * interrupts and save interrupts. We usually have 6 or 10 bytes to patch
978 * bytes to patch into: the Guest versions of these operations are small enough 1043 * into: the Guest versions of these operations are small enough that we can
979 * that we can fit comfortably. 1044 * fit comfortably.
980 * 1045 *
981 * First we need assembly templates of each of the patchable Guest operations, 1046 * First we need assembly templates of each of the patchable Guest operations,
982 * and these are in i386_head.S. */ 1047 * and these are in i386_head.S. */
@@ -987,8 +1052,6 @@ static const struct lguest_insns
987 const char *start, *end; 1052 const char *start, *end;
988} lguest_insns[] = { 1053} lguest_insns[] = {
989 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli }, 1054 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
990 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
991 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
992 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, 1055 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
993}; 1056};
994 1057
@@ -1026,6 +1089,7 @@ __init void lguest_init(void)
1026 pv_info.name = "lguest"; 1089 pv_info.name = "lguest";
1027 pv_info.paravirt_enabled = 1; 1090 pv_info.paravirt_enabled = 1;
1028 pv_info.kernel_rpl = 1; 1091 pv_info.kernel_rpl = 1;
1092 pv_info.shared_kernel_pmd = 1;
1029 1093
1030 /* We set up all the lguest overrides for sensitive operations. These 1094 /* We set up all the lguest overrides for sensitive operations. These
1031 * are detailed with the operations themselves. */ 1095 * are detailed with the operations themselves. */
@@ -1033,9 +1097,9 @@ __init void lguest_init(void)
1033 /* interrupt-related operations */ 1097 /* interrupt-related operations */
1034 pv_irq_ops.init_IRQ = lguest_init_IRQ; 1098 pv_irq_ops.init_IRQ = lguest_init_IRQ;
1035 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); 1099 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
1036 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl); 1100 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
1037 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); 1101 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
1038 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable); 1102 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
1039 pv_irq_ops.safe_halt = lguest_safe_halt; 1103 pv_irq_ops.safe_halt = lguest_safe_halt;
1040 1104
1041 /* init-time operations */ 1105 /* init-time operations */
@@ -1071,6 +1135,12 @@ __init void lguest_init(void)
1071 pv_mmu_ops.set_pte = lguest_set_pte; 1135 pv_mmu_ops.set_pte = lguest_set_pte;
1072 pv_mmu_ops.set_pte_at = lguest_set_pte_at; 1136 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1073 pv_mmu_ops.set_pmd = lguest_set_pmd; 1137 pv_mmu_ops.set_pmd = lguest_set_pmd;
1138#ifdef CONFIG_X86_PAE
1139 pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
1140 pv_mmu_ops.pte_clear = lguest_pte_clear;
1141 pv_mmu_ops.pmd_clear = lguest_pmd_clear;
1142 pv_mmu_ops.set_pud = lguest_set_pud;
1143#endif
1074 pv_mmu_ops.read_cr2 = lguest_read_cr2; 1144 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1075 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1145 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1076 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; 1146 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index f79541989471..a9c8cfe61cd4 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -46,10 +46,64 @@ ENTRY(lguest_entry)
46 .globl lgstart_##name; .globl lgend_##name 46 .globl lgstart_##name; .globl lgend_##name
47 47
48LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) 48LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
49LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
50LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
51LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) 49LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
52/*:*/ 50
51/*G:033 But using those wrappers is inefficient (we'll see why that doesn't
52 * matter for save_fl and irq_disable later). If we write our routines
53 * carefully in assembler, we can avoid clobbering any registers and avoid
54 * jumping through the wrapper functions.
55 *
56 * I skipped over our first piece of assembler, but this one is worth studying
57 * in a bit more detail so I'll describe in easy stages. First, the routine
58 * to enable interrupts: */
59ENTRY(lg_irq_enable)
60 /* The reverse of irq_disable, this sets lguest_data.irq_enabled to
61 * X86_EFLAGS_IF (ie. "Interrupts enabled"). */
62 movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled
63 /* But now we need to check if the Host wants to know: there might have
64 * been interrupts waiting to be delivered, in which case it will have
65 * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we
66 * jump to send_interrupts, otherwise we're done. */
67 testl $0, lguest_data+LGUEST_DATA_irq_pending
68 jnz send_interrupts
69 /* One cool thing about x86 is that you can do many things without using
70 * a register. In this case, the normal path hasn't needed to save or
71 * restore any registers at all! */
72 ret
73send_interrupts:
74 /* OK, now we need a register: eax is used for the hypercall number,
75 * which is LHCALL_SEND_INTERRUPTS.
76 *
77 * We used not to bother with this pending detection at all, which was
78 * much simpler. Sooner or later the Host would realize it had to
79 * send us an interrupt. But that turns out to make performance 7
80 * times worse on a simple tcp benchmark. So now we do this the hard
81 * way. */
82 pushl %eax
83 movl $LHCALL_SEND_INTERRUPTS, %eax
84 /* This is a vmcall instruction (same thing that KVM uses). Older
85 * assembler versions might not know the "vmcall" instruction, so we
86 * create one manually here. */
87 .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
88 popl %eax
89 ret
90
91/* Finally, the "popf" or "restore flags" routine. The %eax register holds the
92 * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're
93 * enabling interrupts again, if it's 0 we're leaving them off. */
94ENTRY(lg_restore_fl)
95 /* This is just "lguest_data.irq_enabled = flags;" */
96 movl %eax, lguest_data+LGUEST_DATA_irq_enabled
97 /* Now, if the %eax value has enabled interrupts and
98 * lguest_data.irq_pending is set, we want to tell the Host so it can
99 * deliver any outstanding interrupts. Fortunately, both values will
100 * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl"
101 * instruction will AND them together for us. If both are set, we
102 * jump to send_interrupts. */
103 testl lguest_data+LGUEST_DATA_irq_pending, %eax
104 jnz send_interrupts
105 /* Again, the normal path has used no extra registers. Clever, huh? */
106 ret
53 107
54/* These demark the EIP range where host should never deliver interrupts. */ 108/* These demark the EIP range where host should never deliver interrupts. */
55.global lguest_noirq_start 109.global lguest_noirq_start
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 949708d7a481..9ff3c0816d15 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -564,7 +564,7 @@ static inline void save_pg_dir(void)
564} 564}
565#endif /* !CONFIG_ACPI_SLEEP */ 565#endif /* !CONFIG_ACPI_SLEEP */
566 566
567void zap_low_mappings(void) 567void zap_low_mappings(bool early)
568{ 568{
569 int i; 569 int i;
570 570
@@ -581,7 +581,11 @@ void zap_low_mappings(void)
581 set_pgd(swapper_pg_dir+i, __pgd(0)); 581 set_pgd(swapper_pg_dir+i, __pgd(0));
582#endif 582#endif
583 } 583 }
584 flush_tlb_all(); 584
585 if (early)
586 __flush_tlb();
587 else
588 flush_tlb_all();
585} 589}
586 590
587pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); 591pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
@@ -956,7 +960,7 @@ void __init mem_init(void)
956 test_wp_bit(); 960 test_wp_bit();
957 961
958 save_pg_dir(); 962 save_pg_dir();
959 zap_low_mappings(); 963 zap_low_mappings(true);
960} 964}
961 965
962#ifdef CONFIG_MEMORY_HOTPLUG 966#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index 58b32db33125..de2abbd07544 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -3,5 +3,5 @@
3nostackp := $(call cc-option, -fno-stack-protector) 3nostackp := $(call cc-option, -fno-stack-protector)
4CFLAGS_cpu_$(BITS).o := $(nostackp) 4CFLAGS_cpu_$(BITS).o := $(nostackp)
5 5
6obj-$(CONFIG_PM_SLEEP) += cpu_$(BITS).o 6obj-$(CONFIG_PM_SLEEP) += cpu.o
7obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o 7obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu.c
index 5343540f2607..d277ef1eea51 100644
--- a/arch/x86/power/cpu_64.c
+++ b/arch/x86/power/cpu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Suspend and hibernation support for x86-64 2 * Suspend support specific for i386/x86-64.
3 * 3 *
4 * Distribute under GPLv2 4 * Distribute under GPLv2
5 * 5 *
@@ -8,18 +8,28 @@
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */ 9 */
10 10
11#include <linux/smp.h>
12#include <linux/suspend.h> 11#include <linux/suspend.h>
13#include <asm/proto.h> 12#include <linux/smp.h>
14#include <asm/page.h> 13
15#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/proto.h>
16#include <asm/mtrr.h> 16#include <asm/mtrr.h>
17#include <asm/page.h>
18#include <asm/mce.h>
17#include <asm/xcr.h> 19#include <asm/xcr.h>
18#include <asm/suspend.h> 20#include <asm/suspend.h>
19 21
20static void fix_processor_context(void); 22#ifdef CONFIG_X86_32
23static struct saved_context saved_context;
21 24
25unsigned long saved_context_ebx;
26unsigned long saved_context_esp, saved_context_ebp;
27unsigned long saved_context_esi, saved_context_edi;
28unsigned long saved_context_eflags;
29#else
30/* CONFIG_X86_64 */
22struct saved_context saved_context; 31struct saved_context saved_context;
32#endif
23 33
24/** 34/**
25 * __save_processor_state - save CPU registers before creating a 35 * __save_processor_state - save CPU registers before creating a
@@ -38,19 +48,35 @@ struct saved_context saved_context;
38 */ 48 */
39static void __save_processor_state(struct saved_context *ctxt) 49static void __save_processor_state(struct saved_context *ctxt)
40{ 50{
51#ifdef CONFIG_X86_32
52 mtrr_save_fixed_ranges(NULL);
53#endif
41 kernel_fpu_begin(); 54 kernel_fpu_begin();
42 55
43 /* 56 /*
44 * descriptor tables 57 * descriptor tables
45 */ 58 */
59#ifdef CONFIG_X86_32
60 store_gdt(&ctxt->gdt);
61 store_idt(&ctxt->idt);
62#else
63/* CONFIG_X86_64 */
46 store_gdt((struct desc_ptr *)&ctxt->gdt_limit); 64 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
47 store_idt((struct desc_ptr *)&ctxt->idt_limit); 65 store_idt((struct desc_ptr *)&ctxt->idt_limit);
66#endif
48 store_tr(ctxt->tr); 67 store_tr(ctxt->tr);
49 68
50 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 69 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
51 /* 70 /*
52 * segment registers 71 * segment registers
53 */ 72 */
73#ifdef CONFIG_X86_32
74 savesegment(es, ctxt->es);
75 savesegment(fs, ctxt->fs);
76 savesegment(gs, ctxt->gs);
77 savesegment(ss, ctxt->ss);
78#else
79/* CONFIG_X86_64 */
54 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); 80 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
55 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); 81 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
56 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); 82 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
@@ -62,30 +88,87 @@ static void __save_processor_state(struct saved_context *ctxt)
62 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 88 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
63 mtrr_save_fixed_ranges(NULL); 89 mtrr_save_fixed_ranges(NULL);
64 90
91 rdmsrl(MSR_EFER, ctxt->efer);
92#endif
93
65 /* 94 /*
66 * control registers 95 * control registers
67 */ 96 */
68 rdmsrl(MSR_EFER, ctxt->efer);
69 ctxt->cr0 = read_cr0(); 97 ctxt->cr0 = read_cr0();
70 ctxt->cr2 = read_cr2(); 98 ctxt->cr2 = read_cr2();
71 ctxt->cr3 = read_cr3(); 99 ctxt->cr3 = read_cr3();
100#ifdef CONFIG_X86_32
101 ctxt->cr4 = read_cr4_safe();
102#else
103/* CONFIG_X86_64 */
72 ctxt->cr4 = read_cr4(); 104 ctxt->cr4 = read_cr4();
73 ctxt->cr8 = read_cr8(); 105 ctxt->cr8 = read_cr8();
106#endif
74} 107}
75 108
109/* Needed by apm.c */
76void save_processor_state(void) 110void save_processor_state(void)
77{ 111{
78 __save_processor_state(&saved_context); 112 __save_processor_state(&saved_context);
79} 113}
114#ifdef CONFIG_X86_32
115EXPORT_SYMBOL(save_processor_state);
116#endif
80 117
81static void do_fpu_end(void) 118static void do_fpu_end(void)
82{ 119{
83 /* 120 /*
84 * Restore FPU regs if necessary 121 * Restore FPU regs if necessary.
85 */ 122 */
86 kernel_fpu_end(); 123 kernel_fpu_end();
87} 124}
88 125
126static void fix_processor_context(void)
127{
128 int cpu = smp_processor_id();
129 struct tss_struct *t = &per_cpu(init_tss, cpu);
130
131 set_tss_desc(cpu, t); /*
132 * This just modifies memory; should not be
133 * necessary. But... This is necessary, because
134 * 386 hardware has concept of busy TSS or some
135 * similar stupidity.
136 */
137
138#ifdef CONFIG_X86_64
139 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
140
141 syscall_init(); /* This sets MSR_*STAR and related */
142#endif
143 load_TR_desc(); /* This does ltr */
144 load_LDT(&current->active_mm->context); /* This does lldt */
145
146 /*
147 * Now maybe reload the debug registers
148 */
149 if (current->thread.debugreg7) {
150#ifdef CONFIG_X86_32
151 set_debugreg(current->thread.debugreg0, 0);
152 set_debugreg(current->thread.debugreg1, 1);
153 set_debugreg(current->thread.debugreg2, 2);
154 set_debugreg(current->thread.debugreg3, 3);
155 /* no 4 and 5 */
156 set_debugreg(current->thread.debugreg6, 6);
157 set_debugreg(current->thread.debugreg7, 7);
158#else
159 /* CONFIG_X86_64 */
160 loaddebug(&current->thread, 0);
161 loaddebug(&current->thread, 1);
162 loaddebug(&current->thread, 2);
163 loaddebug(&current->thread, 3);
164 /* no 4 and 5 */
165 loaddebug(&current->thread, 6);
166 loaddebug(&current->thread, 7);
167#endif
168 }
169
170}
171
89/** 172/**
90 * __restore_processor_state - restore the contents of CPU registers saved 173 * __restore_processor_state - restore the contents of CPU registers saved
91 * by __save_processor_state() 174 * by __save_processor_state()
@@ -96,9 +179,16 @@ static void __restore_processor_state(struct saved_context *ctxt)
96 /* 179 /*
97 * control registers 180 * control registers
98 */ 181 */
182 /* cr4 was introduced in the Pentium CPU */
183#ifdef CONFIG_X86_32
184 if (ctxt->cr4)
185 write_cr4(ctxt->cr4);
186#else
187/* CONFIG X86_64 */
99 wrmsrl(MSR_EFER, ctxt->efer); 188 wrmsrl(MSR_EFER, ctxt->efer);
100 write_cr8(ctxt->cr8); 189 write_cr8(ctxt->cr8);
101 write_cr4(ctxt->cr4); 190 write_cr4(ctxt->cr4);
191#endif
102 write_cr3(ctxt->cr3); 192 write_cr3(ctxt->cr3);
103 write_cr2(ctxt->cr2); 193 write_cr2(ctxt->cr2);
104 write_cr0(ctxt->cr0); 194 write_cr0(ctxt->cr0);
@@ -107,13 +197,31 @@ static void __restore_processor_state(struct saved_context *ctxt)
107 * now restore the descriptor tables to their proper values 197 * now restore the descriptor tables to their proper values
108 * ltr is done i fix_processor_context(). 198 * ltr is done i fix_processor_context().
109 */ 199 */
200#ifdef CONFIG_X86_32
201 load_gdt(&ctxt->gdt);
202 load_idt(&ctxt->idt);
203#else
204/* CONFIG_X86_64 */
110 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); 205 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
111 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 206 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
112 207#endif
113 208
114 /* 209 /*
115 * segment registers 210 * segment registers
116 */ 211 */
212#ifdef CONFIG_X86_32
213 loadsegment(es, ctxt->es);
214 loadsegment(fs, ctxt->fs);
215 loadsegment(gs, ctxt->gs);
216 loadsegment(ss, ctxt->ss);
217
218 /*
219 * sysenter MSRs
220 */
221 if (boot_cpu_has(X86_FEATURE_SEP))
222 enable_sep_cpu();
223#else
224/* CONFIG_X86_64 */
117 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 225 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
118 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); 226 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
119 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); 227 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
@@ -123,6 +231,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
123 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 231 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
124 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 232 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
125 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 233 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
234#endif
126 235
127 /* 236 /*
128 * restore XCR0 for xsave capable cpu's. 237 * restore XCR0 for xsave capable cpu's.
@@ -134,41 +243,17 @@ static void __restore_processor_state(struct saved_context *ctxt)
134 243
135 do_fpu_end(); 244 do_fpu_end();
136 mtrr_ap_init(); 245 mtrr_ap_init();
246
247#ifdef CONFIG_X86_32
248 mcheck_init(&boot_cpu_data);
249#endif
137} 250}
138 251
252/* Needed by apm.c */
139void restore_processor_state(void) 253void restore_processor_state(void)
140{ 254{
141 __restore_processor_state(&saved_context); 255 __restore_processor_state(&saved_context);
142} 256}
143 257#ifdef CONFIG_X86_32
144static void fix_processor_context(void) 258EXPORT_SYMBOL(restore_processor_state);
145{ 259#endif
146 int cpu = smp_processor_id();
147 struct tss_struct *t = &per_cpu(init_tss, cpu);
148
149 /*
150 * This just modifies memory; should not be necessary. But... This
151 * is necessary, because 386 hardware has concept of busy TSS or some
152 * similar stupidity.
153 */
154 set_tss_desc(cpu, t);
155
156 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
157
158 syscall_init(); /* This sets MSR_*STAR and related */
159 load_TR_desc(); /* This does ltr */
160 load_LDT(&current->active_mm->context); /* This does lldt */
161
162 /*
163 * Now maybe reload the debug registers
164 */
165 if (current->thread.debugreg7){
166 loaddebug(&current->thread, 0);
167 loaddebug(&current->thread, 1);
168 loaddebug(&current->thread, 2);
169 loaddebug(&current->thread, 3);
170 /* no 4 and 5 */
171 loaddebug(&current->thread, 6);
172 loaddebug(&current->thread, 7);
173 }
174}
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
deleted file mode 100644
index ce702c5b3a2c..000000000000
--- a/arch/x86/power/cpu_32.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * Suspend support specific for i386.
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
10#include <linux/module.h>
11#include <linux/suspend.h>
12#include <asm/mtrr.h>
13#include <asm/mce.h>
14#include <asm/xcr.h>
15#include <asm/suspend.h>
16
17static struct saved_context saved_context;
18
19unsigned long saved_context_ebx;
20unsigned long saved_context_esp, saved_context_ebp;
21unsigned long saved_context_esi, saved_context_edi;
22unsigned long saved_context_eflags;
23
24static void __save_processor_state(struct saved_context *ctxt)
25{
26 mtrr_save_fixed_ranges(NULL);
27 kernel_fpu_begin();
28
29 /*
30 * descriptor tables
31 */
32 store_gdt(&ctxt->gdt);
33 store_idt(&ctxt->idt);
34 store_tr(ctxt->tr);
35
36 /*
37 * segment registers
38 */
39 savesegment(es, ctxt->es);
40 savesegment(fs, ctxt->fs);
41 savesegment(gs, ctxt->gs);
42 savesegment(ss, ctxt->ss);
43
44 /*
45 * control registers
46 */
47 ctxt->cr0 = read_cr0();
48 ctxt->cr2 = read_cr2();
49 ctxt->cr3 = read_cr3();
50 ctxt->cr4 = read_cr4_safe();
51}
52
53/* Needed by apm.c */
54void save_processor_state(void)
55{
56 __save_processor_state(&saved_context);
57}
58EXPORT_SYMBOL(save_processor_state);
59
60static void do_fpu_end(void)
61{
62 /*
63 * Restore FPU regs if necessary.
64 */
65 kernel_fpu_end();
66}
67
68static void fix_processor_context(void)
69{
70 int cpu = smp_processor_id();
71 struct tss_struct *t = &per_cpu(init_tss, cpu);
72
73 set_tss_desc(cpu, t); /*
74 * This just modifies memory; should not be
75 * necessary. But... This is necessary, because
76 * 386 hardware has concept of busy TSS or some
77 * similar stupidity.
78 */
79
80 load_TR_desc(); /* This does ltr */
81 load_LDT(&current->active_mm->context); /* This does lldt */
82
83 /*
84 * Now maybe reload the debug registers
85 */
86 if (current->thread.debugreg7) {
87 set_debugreg(current->thread.debugreg0, 0);
88 set_debugreg(current->thread.debugreg1, 1);
89 set_debugreg(current->thread.debugreg2, 2);
90 set_debugreg(current->thread.debugreg3, 3);
91 /* no 4 and 5 */
92 set_debugreg(current->thread.debugreg6, 6);
93 set_debugreg(current->thread.debugreg7, 7);
94 }
95
96}
97
98static void __restore_processor_state(struct saved_context *ctxt)
99{
100 /*
101 * control registers
102 */
103 /* cr4 was introduced in the Pentium CPU */
104 if (ctxt->cr4)
105 write_cr4(ctxt->cr4);
106 write_cr3(ctxt->cr3);
107 write_cr2(ctxt->cr2);
108 write_cr0(ctxt->cr0);
109
110 /*
111 * now restore the descriptor tables to their proper values
112 * ltr is done i fix_processor_context().
113 */
114 load_gdt(&ctxt->gdt);
115 load_idt(&ctxt->idt);
116
117 /*
118 * segment registers
119 */
120 loadsegment(es, ctxt->es);
121 loadsegment(fs, ctxt->fs);
122 loadsegment(gs, ctxt->gs);
123 loadsegment(ss, ctxt->ss);
124
125 /*
126 * sysenter MSRs
127 */
128 if (boot_cpu_has(X86_FEATURE_SEP))
129 enable_sep_cpu();
130
131 /*
132 * restore XCR0 for xsave capable cpu's.
133 */
134 if (cpu_has_xsave)
135 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
136
137 fix_processor_context();
138 do_fpu_end();
139 mtrr_ap_init();
140 mcheck_init(&boot_cpu_data);
141}
142
143/* Needed by apm.c */
144void restore_processor_state(void)
145{
146 __restore_processor_state(&saved_context);
147}
148EXPORT_SYMBOL(restore_processor_state);
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index 3981a466c779..c1accea8cb56 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -34,8 +34,6 @@ void *module_alloc(unsigned long size)
34void module_free(struct module *mod, void *module_region) 34void module_free(struct module *mod, void *module_region)
35{ 35{
36 vfree(module_region); 36 vfree(module_region);
37 /* FIXME: If module_region == mod->init_region, trim exception
38 table entries. */
39} 37}
40 38
41int module_frob_arch_sections(Elf32_Ehdr *hdr, 39int module_frob_arch_sections(Elf32_Ehdr *hdr,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d3a59c688fe4..8a267c427629 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -17,7 +17,7 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20 20#include <linux/highmem.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include "base.h" 22#include "base.h"
23 23
@@ -45,7 +45,10 @@ struct firmware_priv {
45 struct bin_attribute attr_data; 45 struct bin_attribute attr_data;
46 struct firmware *fw; 46 struct firmware *fw;
47 unsigned long status; 47 unsigned long status;
48 int alloc_size; 48 struct page **pages;
49 int nr_pages;
50 int page_array_size;
51 const char *vdata;
49 struct timer_list timeout; 52 struct timer_list timeout;
50}; 53};
51 54
@@ -122,6 +125,10 @@ static ssize_t firmware_loading_show(struct device *dev,
122 return sprintf(buf, "%d\n", loading); 125 return sprintf(buf, "%d\n", loading);
123} 126}
124 127
128/* Some architectures don't have PAGE_KERNEL_RO */
129#ifndef PAGE_KERNEL_RO
130#define PAGE_KERNEL_RO PAGE_KERNEL
131#endif
125/** 132/**
126 * firmware_loading_store - set value in the 'loading' control file 133 * firmware_loading_store - set value in the 'loading' control file
127 * @dev: device pointer 134 * @dev: device pointer
@@ -141,6 +148,7 @@ static ssize_t firmware_loading_store(struct device *dev,
141{ 148{
142 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 149 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
143 int loading = simple_strtol(buf, NULL, 10); 150 int loading = simple_strtol(buf, NULL, 10);
151 int i;
144 152
145 switch (loading) { 153 switch (loading) {
146 case 1: 154 case 1:
@@ -151,13 +159,30 @@ static ssize_t firmware_loading_store(struct device *dev,
151 } 159 }
152 vfree(fw_priv->fw->data); 160 vfree(fw_priv->fw->data);
153 fw_priv->fw->data = NULL; 161 fw_priv->fw->data = NULL;
162 for (i = 0; i < fw_priv->nr_pages; i++)
163 __free_page(fw_priv->pages[i]);
164 kfree(fw_priv->pages);
165 fw_priv->pages = NULL;
166 fw_priv->page_array_size = 0;
167 fw_priv->nr_pages = 0;
154 fw_priv->fw->size = 0; 168 fw_priv->fw->size = 0;
155 fw_priv->alloc_size = 0;
156 set_bit(FW_STATUS_LOADING, &fw_priv->status); 169 set_bit(FW_STATUS_LOADING, &fw_priv->status);
157 mutex_unlock(&fw_lock); 170 mutex_unlock(&fw_lock);
158 break; 171 break;
159 case 0: 172 case 0:
160 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { 173 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
174 vfree(fw_priv->fw->data);
175 fw_priv->fw->data = vmap(fw_priv->pages,
176 fw_priv->nr_pages,
177 0, PAGE_KERNEL_RO);
178 if (!fw_priv->fw->data) {
179 dev_err(dev, "%s: vmap() failed\n", __func__);
180 goto err;
181 }
182 /* Pages will be freed by vfree() */
183 fw_priv->pages = NULL;
184 fw_priv->page_array_size = 0;
185 fw_priv->nr_pages = 0;
161 complete(&fw_priv->completion); 186 complete(&fw_priv->completion);
162 clear_bit(FW_STATUS_LOADING, &fw_priv->status); 187 clear_bit(FW_STATUS_LOADING, &fw_priv->status);
163 break; 188 break;
@@ -167,6 +192,7 @@ static ssize_t firmware_loading_store(struct device *dev,
167 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 192 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
168 /* fallthrough */ 193 /* fallthrough */
169 case -1: 194 case -1:
195 err:
170 fw_load_abort(fw_priv); 196 fw_load_abort(fw_priv);
171 break; 197 break;
172 } 198 }
@@ -191,8 +217,28 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
191 ret_count = -ENODEV; 217 ret_count = -ENODEV;
192 goto out; 218 goto out;
193 } 219 }
194 ret_count = memory_read_from_buffer(buffer, count, &offset, 220 if (offset > fw->size)
195 fw->data, fw->size); 221 return 0;
222 if (count > fw->size - offset)
223 count = fw->size - offset;
224
225 ret_count = count;
226
227 while (count) {
228 void *page_data;
229 int page_nr = offset >> PAGE_SHIFT;
230 int page_ofs = offset & (PAGE_SIZE-1);
231 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
232
233 page_data = kmap(fw_priv->pages[page_nr]);
234
235 memcpy(buffer, page_data + page_ofs, page_cnt);
236
237 kunmap(fw_priv->pages[page_nr]);
238 buffer += page_cnt;
239 offset += page_cnt;
240 count -= page_cnt;
241 }
196out: 242out:
197 mutex_unlock(&fw_lock); 243 mutex_unlock(&fw_lock);
198 return ret_count; 244 return ret_count;
@@ -201,27 +247,39 @@ out:
201static int 247static int
202fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 248fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
203{ 249{
204 u8 *new_data; 250 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
205 int new_size = fw_priv->alloc_size; 251
252 /* If the array of pages is too small, grow it... */
253 if (fw_priv->page_array_size < pages_needed) {
254 int new_array_size = max(pages_needed,
255 fw_priv->page_array_size * 2);
256 struct page **new_pages;
257
258 new_pages = kmalloc(new_array_size * sizeof(void *),
259 GFP_KERNEL);
260 if (!new_pages) {
261 fw_load_abort(fw_priv);
262 return -ENOMEM;
263 }
264 memcpy(new_pages, fw_priv->pages,
265 fw_priv->page_array_size * sizeof(void *));
266 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
267 (new_array_size - fw_priv->page_array_size));
268 kfree(fw_priv->pages);
269 fw_priv->pages = new_pages;
270 fw_priv->page_array_size = new_array_size;
271 }
206 272
207 if (min_size <= fw_priv->alloc_size) 273 while (fw_priv->nr_pages < pages_needed) {
208 return 0; 274 fw_priv->pages[fw_priv->nr_pages] =
275 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
209 276
210 new_size = ALIGN(min_size, PAGE_SIZE); 277 if (!fw_priv->pages[fw_priv->nr_pages]) {
211 new_data = vmalloc(new_size); 278 fw_load_abort(fw_priv);
212 if (!new_data) { 279 return -ENOMEM;
213 printk(KERN_ERR "%s: unable to alloc buffer\n", __func__); 280 }
214 /* Make sure that we don't keep incomplete data */ 281 fw_priv->nr_pages++;
215 fw_load_abort(fw_priv);
216 return -ENOMEM;
217 }
218 fw_priv->alloc_size = new_size;
219 if (fw_priv->fw->data) {
220 memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size);
221 vfree(fw_priv->fw->data);
222 } 282 }
223 fw_priv->fw->data = new_data;
224 BUG_ON(min_size > fw_priv->alloc_size);
225 return 0; 283 return 0;
226} 284}
227 285
@@ -258,10 +316,25 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
258 if (retval) 316 if (retval)
259 goto out; 317 goto out;
260 318
261 memcpy((u8 *)fw->data + offset, buffer, count);
262
263 fw->size = max_t(size_t, offset + count, fw->size);
264 retval = count; 319 retval = count;
320
321 while (count) {
322 void *page_data;
323 int page_nr = offset >> PAGE_SHIFT;
324 int page_ofs = offset & (PAGE_SIZE - 1);
325 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
326
327 page_data = kmap(fw_priv->pages[page_nr]);
328
329 memcpy(page_data + page_ofs, buffer, page_cnt);
330
331 kunmap(fw_priv->pages[page_nr]);
332 buffer += page_cnt;
333 offset += page_cnt;
334 count -= page_cnt;
335 }
336
337 fw->size = max_t(size_t, offset, fw->size);
265out: 338out:
266 mutex_unlock(&fw_lock); 339 mutex_unlock(&fw_lock);
267 return retval; 340 return retval;
@@ -277,7 +350,11 @@ static struct bin_attribute firmware_attr_data_tmpl = {
277static void fw_dev_release(struct device *dev) 350static void fw_dev_release(struct device *dev)
278{ 351{
279 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 352 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
353 int i;
280 354
355 for (i = 0; i < fw_priv->nr_pages; i++)
356 __free_page(fw_priv->pages[i]);
357 kfree(fw_priv->pages);
281 kfree(fw_priv); 358 kfree(fw_priv);
282 kfree(dev); 359 kfree(dev);
283 360
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8b4708e06244..ead3f64c41d0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -469,22 +469,6 @@ static void platform_drv_shutdown(struct device *_dev)
469 drv->shutdown(dev); 469 drv->shutdown(dev);
470} 470}
471 471
472static int platform_drv_suspend(struct device *_dev, pm_message_t state)
473{
474 struct platform_driver *drv = to_platform_driver(_dev->driver);
475 struct platform_device *dev = to_platform_device(_dev);
476
477 return drv->suspend(dev, state);
478}
479
480static int platform_drv_resume(struct device *_dev)
481{
482 struct platform_driver *drv = to_platform_driver(_dev->driver);
483 struct platform_device *dev = to_platform_device(_dev);
484
485 return drv->resume(dev);
486}
487
488/** 472/**
489 * platform_driver_register 473 * platform_driver_register
490 * @drv: platform driver structure 474 * @drv: platform driver structure
@@ -498,10 +482,10 @@ int platform_driver_register(struct platform_driver *drv)
498 drv->driver.remove = platform_drv_remove; 482 drv->driver.remove = platform_drv_remove;
499 if (drv->shutdown) 483 if (drv->shutdown)
500 drv->driver.shutdown = platform_drv_shutdown; 484 drv->driver.shutdown = platform_drv_shutdown;
501 if (drv->suspend) 485 if (drv->suspend || drv->resume)
502 drv->driver.suspend = platform_drv_suspend; 486 pr_warning("Platform driver '%s' needs updating - please use "
503 if (drv->resume) 487 "dev_pm_ops\n", drv->driver.name);
504 drv->driver.resume = platform_drv_resume; 488
505 return driver_register(&drv->driver); 489 return driver_register(&drv->driver);
506} 490}
507EXPORT_SYMBOL_GPL(platform_driver_register); 491EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -633,10 +617,12 @@ static int platform_match(struct device *dev, struct device_driver *drv)
633 617
634static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 618static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
635{ 619{
620 struct platform_driver *pdrv = to_platform_driver(dev->driver);
621 struct platform_device *pdev = to_platform_device(dev);
636 int ret = 0; 622 int ret = 0;
637 623
638 if (dev->driver && dev->driver->suspend) 624 if (dev->driver && pdrv->suspend)
639 ret = dev->driver->suspend(dev, mesg); 625 ret = pdrv->suspend(pdev, mesg);
640 626
641 return ret; 627 return ret;
642} 628}
@@ -667,10 +653,12 @@ static int platform_legacy_resume_early(struct device *dev)
667 653
668static int platform_legacy_resume(struct device *dev) 654static int platform_legacy_resume(struct device *dev)
669{ 655{
656 struct platform_driver *pdrv = to_platform_driver(dev->driver);
657 struct platform_device *pdev = to_platform_device(dev);
670 int ret = 0; 658 int ret = 0;
671 659
672 if (dev->driver && dev->driver->resume) 660 if (dev->driver && pdrv->resume)
673 ret = dev->driver->resume(dev); 661 ret = pdrv->resume(pdev);
674 662
675 return ret; 663 return ret;
676} 664}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3e4bc699bc0f..fae725458981 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -315,13 +315,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
315/*------------------------- Resume routines -------------------------*/ 315/*------------------------- Resume routines -------------------------*/
316 316
317/** 317/**
318 * resume_device_noirq - Power on one device (early resume). 318 * device_resume_noirq - Power on one device (early resume).
319 * @dev: Device. 319 * @dev: Device.
320 * @state: PM transition of the system being carried out. 320 * @state: PM transition of the system being carried out.
321 * 321 *
322 * Must be called with interrupts disabled. 322 * Must be called with interrupts disabled.
323 */ 323 */
324static int resume_device_noirq(struct device *dev, pm_message_t state) 324static int device_resume_noirq(struct device *dev, pm_message_t state)
325{ 325{
326 int error = 0; 326 int error = 0;
327 327
@@ -334,9 +334,6 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
334 if (dev->bus->pm) { 334 if (dev->bus->pm) {
335 pm_dev_dbg(dev, state, "EARLY "); 335 pm_dev_dbg(dev, state, "EARLY ");
336 error = pm_noirq_op(dev, dev->bus->pm, state); 336 error = pm_noirq_op(dev, dev->bus->pm, state);
337 } else if (dev->bus->resume_early) {
338 pm_dev_dbg(dev, state, "legacy EARLY ");
339 error = dev->bus->resume_early(dev);
340 } 337 }
341 End: 338 End:
342 TRACE_RESUME(error); 339 TRACE_RESUME(error);
@@ -344,16 +341,16 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
344} 341}
345 342
346/** 343/**
347 * dpm_power_up - Power on all regular (non-sysdev) devices. 344 * dpm_resume_noirq - Power on all regular (non-sysdev) devices.
348 * @state: PM transition of the system being carried out. 345 * @state: PM transition of the system being carried out.
349 * 346 *
350 * Execute the appropriate "noirq resume" callback for all devices marked 347 * Call the "noirq" resume handlers for all devices marked as
351 * as DPM_OFF_IRQ. 348 * DPM_OFF_IRQ and enable device drivers to receive interrupts.
352 * 349 *
353 * Must be called under dpm_list_mtx. Device drivers should not receive 350 * Must be called under dpm_list_mtx. Device drivers should not receive
354 * interrupts while it's being executed. 351 * interrupts while it's being executed.
355 */ 352 */
356static void dpm_power_up(pm_message_t state) 353void dpm_resume_noirq(pm_message_t state)
357{ 354{
358 struct device *dev; 355 struct device *dev;
359 356
@@ -363,33 +360,21 @@ static void dpm_power_up(pm_message_t state)
363 int error; 360 int error;
364 361
365 dev->power.status = DPM_OFF; 362 dev->power.status = DPM_OFF;
366 error = resume_device_noirq(dev, state); 363 error = device_resume_noirq(dev, state);
367 if (error) 364 if (error)
368 pm_dev_err(dev, state, " early", error); 365 pm_dev_err(dev, state, " early", error);
369 } 366 }
370 mutex_unlock(&dpm_list_mtx); 367 mutex_unlock(&dpm_list_mtx);
371}
372
373/**
374 * device_power_up - Turn on all devices that need special attention.
375 * @state: PM transition of the system being carried out.
376 *
377 * Call the "early" resume handlers and enable device drivers to receive
378 * interrupts.
379 */
380void device_power_up(pm_message_t state)
381{
382 dpm_power_up(state);
383 resume_device_irqs(); 368 resume_device_irqs();
384} 369}
385EXPORT_SYMBOL_GPL(device_power_up); 370EXPORT_SYMBOL_GPL(dpm_resume_noirq);
386 371
387/** 372/**
388 * resume_device - Restore state for one device. 373 * device_resume - Restore state for one device.
389 * @dev: Device. 374 * @dev: Device.
390 * @state: PM transition of the system being carried out. 375 * @state: PM transition of the system being carried out.
391 */ 376 */
392static int resume_device(struct device *dev, pm_message_t state) 377static int device_resume(struct device *dev, pm_message_t state)
393{ 378{
394 int error = 0; 379 int error = 0;
395 380
@@ -414,9 +399,6 @@ static int resume_device(struct device *dev, pm_message_t state)
414 if (dev->type->pm) { 399 if (dev->type->pm) {
415 pm_dev_dbg(dev, state, "type "); 400 pm_dev_dbg(dev, state, "type ");
416 error = pm_op(dev, dev->type->pm, state); 401 error = pm_op(dev, dev->type->pm, state);
417 } else if (dev->type->resume) {
418 pm_dev_dbg(dev, state, "legacy type ");
419 error = dev->type->resume(dev);
420 } 402 }
421 if (error) 403 if (error)
422 goto End; 404 goto End;
@@ -462,7 +444,7 @@ static void dpm_resume(pm_message_t state)
462 dev->power.status = DPM_RESUMING; 444 dev->power.status = DPM_RESUMING;
463 mutex_unlock(&dpm_list_mtx); 445 mutex_unlock(&dpm_list_mtx);
464 446
465 error = resume_device(dev, state); 447 error = device_resume(dev, state);
466 448
467 mutex_lock(&dpm_list_mtx); 449 mutex_lock(&dpm_list_mtx);
468 if (error) 450 if (error)
@@ -480,11 +462,11 @@ static void dpm_resume(pm_message_t state)
480} 462}
481 463
482/** 464/**
483 * complete_device - Complete a PM transition for given device 465 * device_complete - Complete a PM transition for given device
484 * @dev: Device. 466 * @dev: Device.
485 * @state: PM transition of the system being carried out. 467 * @state: PM transition of the system being carried out.
486 */ 468 */
487static void complete_device(struct device *dev, pm_message_t state) 469static void device_complete(struct device *dev, pm_message_t state)
488{ 470{
489 down(&dev->sem); 471 down(&dev->sem);
490 472
@@ -527,7 +509,7 @@ static void dpm_complete(pm_message_t state)
527 dev->power.status = DPM_ON; 509 dev->power.status = DPM_ON;
528 mutex_unlock(&dpm_list_mtx); 510 mutex_unlock(&dpm_list_mtx);
529 511
530 complete_device(dev, state); 512 device_complete(dev, state);
531 513
532 mutex_lock(&dpm_list_mtx); 514 mutex_lock(&dpm_list_mtx);
533 } 515 }
@@ -540,19 +522,19 @@ static void dpm_complete(pm_message_t state)
540} 522}
541 523
542/** 524/**
543 * device_resume - Restore state of each device in system. 525 * dpm_resume_end - Restore state of each device in system.
544 * @state: PM transition of the system being carried out. 526 * @state: PM transition of the system being carried out.
545 * 527 *
546 * Resume all the devices, unlock them all, and allow new 528 * Resume all the devices, unlock them all, and allow new
547 * devices to be registered once again. 529 * devices to be registered once again.
548 */ 530 */
549void device_resume(pm_message_t state) 531void dpm_resume_end(pm_message_t state)
550{ 532{
551 might_sleep(); 533 might_sleep();
552 dpm_resume(state); 534 dpm_resume(state);
553 dpm_complete(state); 535 dpm_complete(state);
554} 536}
555EXPORT_SYMBOL_GPL(device_resume); 537EXPORT_SYMBOL_GPL(dpm_resume_end);
556 538
557 539
558/*------------------------- Suspend routines -------------------------*/ 540/*------------------------- Suspend routines -------------------------*/
@@ -577,13 +559,13 @@ static pm_message_t resume_event(pm_message_t sleep_state)
577} 559}
578 560
579/** 561/**
580 * suspend_device_noirq - Shut down one device (late suspend). 562 * device_suspend_noirq - Shut down one device (late suspend).
581 * @dev: Device. 563 * @dev: Device.
582 * @state: PM transition of the system being carried out. 564 * @state: PM transition of the system being carried out.
583 * 565 *
584 * This is called with interrupts off and only a single CPU running. 566 * This is called with interrupts off and only a single CPU running.
585 */ 567 */
586static int suspend_device_noirq(struct device *dev, pm_message_t state) 568static int device_suspend_noirq(struct device *dev, pm_message_t state)
587{ 569{
588 int error = 0; 570 int error = 0;
589 571
@@ -593,24 +575,20 @@ static int suspend_device_noirq(struct device *dev, pm_message_t state)
593 if (dev->bus->pm) { 575 if (dev->bus->pm) {
594 pm_dev_dbg(dev, state, "LATE "); 576 pm_dev_dbg(dev, state, "LATE ");
595 error = pm_noirq_op(dev, dev->bus->pm, state); 577 error = pm_noirq_op(dev, dev->bus->pm, state);
596 } else if (dev->bus->suspend_late) {
597 pm_dev_dbg(dev, state, "legacy LATE ");
598 error = dev->bus->suspend_late(dev, state);
599 suspend_report_result(dev->bus->suspend_late, error);
600 } 578 }
601 return error; 579 return error;
602} 580}
603 581
604/** 582/**
605 * device_power_down - Shut down special devices. 583 * dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
606 * @state: PM transition of the system being carried out. 584 * @state: PM transition of the system being carried out.
607 * 585 *
608 * Prevent device drivers from receiving interrupts and call the "late" 586 * Prevent device drivers from receiving interrupts and call the "noirq"
609 * suspend handlers. 587 * suspend handlers.
610 * 588 *
611 * Must be called under dpm_list_mtx. 589 * Must be called under dpm_list_mtx.
612 */ 590 */
613int device_power_down(pm_message_t state) 591int dpm_suspend_noirq(pm_message_t state)
614{ 592{
615 struct device *dev; 593 struct device *dev;
616 int error = 0; 594 int error = 0;
@@ -618,7 +596,7 @@ int device_power_down(pm_message_t state)
618 suspend_device_irqs(); 596 suspend_device_irqs();
619 mutex_lock(&dpm_list_mtx); 597 mutex_lock(&dpm_list_mtx);
620 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 598 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
621 error = suspend_device_noirq(dev, state); 599 error = device_suspend_noirq(dev, state);
622 if (error) { 600 if (error) {
623 pm_dev_err(dev, state, " late", error); 601 pm_dev_err(dev, state, " late", error);
624 break; 602 break;
@@ -627,17 +605,17 @@ int device_power_down(pm_message_t state)
627 } 605 }
628 mutex_unlock(&dpm_list_mtx); 606 mutex_unlock(&dpm_list_mtx);
629 if (error) 607 if (error)
630 device_power_up(resume_event(state)); 608 dpm_resume_noirq(resume_event(state));
631 return error; 609 return error;
632} 610}
633EXPORT_SYMBOL_GPL(device_power_down); 611EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
634 612
635/** 613/**
636 * suspend_device - Save state of one device. 614 * device_suspend - Save state of one device.
637 * @dev: Device. 615 * @dev: Device.
638 * @state: PM transition of the system being carried out. 616 * @state: PM transition of the system being carried out.
639 */ 617 */
640static int suspend_device(struct device *dev, pm_message_t state) 618static int device_suspend(struct device *dev, pm_message_t state)
641{ 619{
642 int error = 0; 620 int error = 0;
643 621
@@ -660,10 +638,6 @@ static int suspend_device(struct device *dev, pm_message_t state)
660 if (dev->type->pm) { 638 if (dev->type->pm) {
661 pm_dev_dbg(dev, state, "type "); 639 pm_dev_dbg(dev, state, "type ");
662 error = pm_op(dev, dev->type->pm, state); 640 error = pm_op(dev, dev->type->pm, state);
663 } else if (dev->type->suspend) {
664 pm_dev_dbg(dev, state, "legacy type ");
665 error = dev->type->suspend(dev, state);
666 suspend_report_result(dev->type->suspend, error);
667 } 641 }
668 if (error) 642 if (error)
669 goto End; 643 goto End;
@@ -704,7 +678,7 @@ static int dpm_suspend(pm_message_t state)
704 get_device(dev); 678 get_device(dev);
705 mutex_unlock(&dpm_list_mtx); 679 mutex_unlock(&dpm_list_mtx);
706 680
707 error = suspend_device(dev, state); 681 error = device_suspend(dev, state);
708 682
709 mutex_lock(&dpm_list_mtx); 683 mutex_lock(&dpm_list_mtx);
710 if (error) { 684 if (error) {
@@ -723,11 +697,11 @@ static int dpm_suspend(pm_message_t state)
723} 697}
724 698
725/** 699/**
726 * prepare_device - Execute the ->prepare() callback(s) for given device. 700 * device_prepare - Execute the ->prepare() callback(s) for given device.
727 * @dev: Device. 701 * @dev: Device.
728 * @state: PM transition of the system being carried out. 702 * @state: PM transition of the system being carried out.
729 */ 703 */
730static int prepare_device(struct device *dev, pm_message_t state) 704static int device_prepare(struct device *dev, pm_message_t state)
731{ 705{
732 int error = 0; 706 int error = 0;
733 707
@@ -781,7 +755,7 @@ static int dpm_prepare(pm_message_t state)
781 dev->power.status = DPM_PREPARING; 755 dev->power.status = DPM_PREPARING;
782 mutex_unlock(&dpm_list_mtx); 756 mutex_unlock(&dpm_list_mtx);
783 757
784 error = prepare_device(dev, state); 758 error = device_prepare(dev, state);
785 759
786 mutex_lock(&dpm_list_mtx); 760 mutex_lock(&dpm_list_mtx);
787 if (error) { 761 if (error) {
@@ -807,12 +781,12 @@ static int dpm_prepare(pm_message_t state)
807} 781}
808 782
809/** 783/**
810 * device_suspend - Save state and stop all devices in system. 784 * dpm_suspend_start - Save state and stop all devices in system.
811 * @state: PM transition of the system being carried out. 785 * @state: PM transition of the system being carried out.
812 * 786 *
813 * Prepare and suspend all devices. 787 * Prepare and suspend all devices.
814 */ 788 */
815int device_suspend(pm_message_t state) 789int dpm_suspend_start(pm_message_t state)
816{ 790{
817 int error; 791 int error;
818 792
@@ -822,7 +796,7 @@ int device_suspend(pm_message_t state)
822 error = dpm_suspend(state); 796 error = dpm_suspend(state);
823 return error; 797 return error;
824} 798}
825EXPORT_SYMBOL_GPL(device_suspend); 799EXPORT_SYMBOL_GPL(dpm_suspend_start);
826 800
827void __suspend_report_result(const char *function, void *fn, int ret) 801void __suspend_report_result(const char *function, void *fn, int ret)
828{ 802{
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 3236b434b964..9742a78c9fe4 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -343,11 +343,15 @@ static void __sysdev_resume(struct sys_device *dev)
343 /* First, call the class-specific one */ 343 /* First, call the class-specific one */
344 if (cls->resume) 344 if (cls->resume)
345 cls->resume(dev); 345 cls->resume(dev);
346 WARN_ONCE(!irqs_disabled(),
347 "Interrupts enabled after %pF\n", cls->resume);
346 348
347 /* Call auxillary drivers next. */ 349 /* Call auxillary drivers next. */
348 list_for_each_entry(drv, &cls->drivers, entry) { 350 list_for_each_entry(drv, &cls->drivers, entry) {
349 if (drv->resume) 351 if (drv->resume)
350 drv->resume(dev); 352 drv->resume(dev);
353 WARN_ONCE(!irqs_disabled(),
354 "Interrupts enabled after %pF\n", drv->resume);
351 } 355 }
352} 356}
353 357
@@ -377,6 +381,9 @@ int sysdev_suspend(pm_message_t state)
377 if (ret) 381 if (ret)
378 return ret; 382 return ret;
379 383
384 WARN_ONCE(!irqs_disabled(),
385 "Interrupts enabled while suspending system devices\n");
386
380 pr_debug("Suspending System Devices\n"); 387 pr_debug("Suspending System Devices\n");
381 388
382 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { 389 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
@@ -393,6 +400,9 @@ int sysdev_suspend(pm_message_t state)
393 if (ret) 400 if (ret)
394 goto aux_driver; 401 goto aux_driver;
395 } 402 }
403 WARN_ONCE(!irqs_disabled(),
404 "Interrupts enabled after %pF\n",
405 drv->suspend);
396 } 406 }
397 407
398 /* Now call the generic one */ 408 /* Now call the generic one */
@@ -400,6 +410,9 @@ int sysdev_suspend(pm_message_t state)
400 ret = cls->suspend(sysdev, state); 410 ret = cls->suspend(sysdev, state);
401 if (ret) 411 if (ret)
402 goto cls_driver; 412 goto cls_driver;
413 WARN_ONCE(!irqs_disabled(),
414 "Interrupts enabled after %pF\n",
415 cls->suspend);
403 } 416 }
404 } 417 }
405 } 418 }
@@ -452,6 +465,9 @@ int sysdev_resume(void)
452{ 465{
453 struct sysdev_class *cls; 466 struct sysdev_class *cls;
454 467
468 WARN_ONCE(!irqs_disabled(),
469 "Interrupts enabled while resuming system devices\n");
470
455 pr_debug("Resuming System Devices\n"); 471 pr_debug("Resuming System Devices\n");
456 472
457 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { 473 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index f42fa50d3550..ac5e05a98b2f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -438,7 +438,7 @@ source "drivers/s390/block/Kconfig"
438 438
439config XILINX_SYSACE 439config XILINX_SYSACE
440 tristate "Xilinx SystemACE support" 440 tristate "Xilinx SystemACE support"
441 depends on 4xx 441 depends on 4xx || MICROBLAZE
442 help 442 help
443 Include support for the Xilinx SystemACE CompactFlash interface 443 Include support for the Xilinx SystemACE CompactFlash interface
444 444
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c0facaa55cf4..43db3ea15b54 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -254,7 +254,7 @@ static int index_to_minor(int index)
254 return index << PART_BITS; 254 return index << PART_BITS;
255} 255}
256 256
257static int virtblk_probe(struct virtio_device *vdev) 257static int __devinit virtblk_probe(struct virtio_device *vdev)
258{ 258{
259 struct virtio_blk *vblk; 259 struct virtio_blk *vblk;
260 int err; 260 int err;
@@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev)
288 sg_init_table(vblk->sg, vblk->sg_elems); 288 sg_init_table(vblk->sg, vblk->sg_elems);
289 289
290 /* We expect one virtqueue, for output. */ 290 /* We expect one virtqueue, for output. */
291 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); 291 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
292 if (IS_ERR(vblk->vq)) { 292 if (IS_ERR(vblk->vq)) {
293 err = PTR_ERR(vblk->vq); 293 err = PTR_ERR(vblk->vq);
294 goto out_free_vblk; 294 goto out_free_vblk;
@@ -388,14 +388,14 @@ out_put_disk:
388out_mempool: 388out_mempool:
389 mempool_destroy(vblk->pool); 389 mempool_destroy(vblk->pool);
390out_free_vq: 390out_free_vq:
391 vdev->config->del_vq(vblk->vq); 391 vdev->config->del_vqs(vdev);
392out_free_vblk: 392out_free_vblk:
393 kfree(vblk); 393 kfree(vblk);
394out: 394out:
395 return err; 395 return err;
396} 396}
397 397
398static void virtblk_remove(struct virtio_device *vdev) 398static void __devexit virtblk_remove(struct virtio_device *vdev)
399{ 399{
400 struct virtio_blk *vblk = vdev->priv; 400 struct virtio_blk *vblk = vdev->priv;
401 401
@@ -409,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev)
409 blk_cleanup_queue(vblk->disk->queue); 409 blk_cleanup_queue(vblk->disk->queue);
410 put_disk(vblk->disk); 410 put_disk(vblk->disk);
411 mempool_destroy(vblk->pool); 411 mempool_destroy(vblk->pool);
412 vdev->config->del_vq(vblk->vq); 412 vdev->config->del_vqs(vdev);
413 kfree(vblk); 413 kfree(vblk);
414} 414}
415 415
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 02ecfd5fa61c..b1e9652c0d9d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -906,7 +906,7 @@ config DTLK
906 906
907config XILINX_HWICAP 907config XILINX_HWICAP
908 tristate "Xilinx HWICAP Support" 908 tristate "Xilinx HWICAP Support"
909 depends on XILINX_VIRTEX 909 depends on XILINX_VIRTEX || MICROBLAZE
910 help 910 help
911 This option enables support for Xilinx Internal Configuration 911 This option enables support for Xilinx Internal Configuration
912 Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex 912 Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3686912427ba..7a748fa0dfce 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -46,6 +46,10 @@
46#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 46#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
47#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 47#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
48#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 48#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
52#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
49 53
50/* cover 915 and 945 variants */ 54/* cover 915 and 945 variants */
51#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 55#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -75,7 +79,9 @@
75 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 79 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
76 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ 80 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
77 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
78 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB) 82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
79 85
80extern int agp_memory_reserved; 86extern int agp_memory_reserved;
81 87
@@ -1211,6 +1217,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1211 case PCI_DEVICE_ID_INTEL_Q45_HB: 1217 case PCI_DEVICE_ID_INTEL_Q45_HB:
1212 case PCI_DEVICE_ID_INTEL_G45_HB: 1218 case PCI_DEVICE_ID_INTEL_G45_HB:
1213 case PCI_DEVICE_ID_INTEL_G41_HB: 1219 case PCI_DEVICE_ID_INTEL_G41_HB:
1220 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1221 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1214 *gtt_offset = *gtt_size = MB(2); 1222 *gtt_offset = *gtt_size = MB(2);
1215 break; 1223 break;
1216 default: 1224 default:
@@ -2186,6 +2194,10 @@ static const struct intel_driver_description {
2186 "G45/G43", NULL, &intel_i965_driver }, 2194 "G45/G43", NULL, &intel_i965_driver },
2187 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2195 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2188 "G41", NULL, &intel_i965_driver }, 2196 "G41", NULL, &intel_i965_driver },
2197 { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
2198 "IGDNG/D", NULL, &intel_i965_driver },
2199 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2200 "IGDNG/M", NULL, &intel_i965_driver },
2189 { 0, 0, 0, NULL, NULL, NULL } 2201 { 0, 0, 0, NULL, NULL, NULL }
2190}; 2202};
2191 2203
@@ -2387,6 +2399,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2387 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2399 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2388 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2400 ID(PCI_DEVICE_ID_INTEL_G45_HB),
2389 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2401 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2402 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2403 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2390 { } 2404 { }
2391}; 2405};
2392 2406
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 86e83f883139..32216b623248 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data);
35 35
36static void random_recv_done(struct virtqueue *vq) 36static void random_recv_done(struct virtqueue *vq)
37{ 37{
38 int len; 38 unsigned int len;
39 39
40 /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ 40 /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
41 if (!vq->vq_ops->get_buf(vq, &len)) 41 if (!vq->vq_ops->get_buf(vq, &len))
42 return; 42 return;
43 43
44 data_left = len / sizeof(random_data[0]); 44 data_left += len;
45 complete(&have_data); 45 complete(&have_data);
46} 46}
47 47
@@ -49,7 +49,7 @@ static void register_buffer(void)
49{ 49{
50 struct scatterlist sg; 50 struct scatterlist sg;
51 51
52 sg_init_one(&sg, random_data, RANDOM_DATA_SIZE); 52 sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
53 /* There should always be room for one buffer. */ 53 /* There should always be room for one buffer. */
54 if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) 54 if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0)
55 BUG(); 55 BUG();
@@ -59,24 +59,32 @@ static void register_buffer(void)
59/* At least we don't udelay() in a loop like some other drivers. */ 59/* At least we don't udelay() in a loop like some other drivers. */
60static int virtio_data_present(struct hwrng *rng, int wait) 60static int virtio_data_present(struct hwrng *rng, int wait)
61{ 61{
62 if (data_left) 62 if (data_left >= sizeof(u32))
63 return 1; 63 return 1;
64 64
65again:
65 if (!wait) 66 if (!wait)
66 return 0; 67 return 0;
67 68
68 wait_for_completion(&have_data); 69 wait_for_completion(&have_data);
70
71 /* Not enough? Re-register. */
72 if (unlikely(data_left < sizeof(u32))) {
73 register_buffer();
74 goto again;
75 }
76
69 return 1; 77 return 1;
70} 78}
71 79
72/* virtio_data_present() must have succeeded before this is called. */ 80/* virtio_data_present() must have succeeded before this is called. */
73static int virtio_data_read(struct hwrng *rng, u32 *data) 81static int virtio_data_read(struct hwrng *rng, u32 *data)
74{ 82{
75 BUG_ON(!data_left); 83 BUG_ON(data_left < sizeof(u32));
76 84 data_left -= sizeof(u32);
77 *data = random_data[--data_left]; 85 *data = random_data[data_left / 4];
78 86
79 if (!data_left) { 87 if (data_left < sizeof(u32)) {
80 init_completion(&have_data); 88 init_completion(&have_data);
81 register_buffer(); 89 register_buffer();
82 } 90 }
@@ -94,13 +102,13 @@ static int virtrng_probe(struct virtio_device *vdev)
94 int err; 102 int err;
95 103
96 /* We expect a single virtqueue. */ 104 /* We expect a single virtqueue. */
97 vq = vdev->config->find_vq(vdev, 0, random_recv_done); 105 vq = virtio_find_single_vq(vdev, random_recv_done, "input");
98 if (IS_ERR(vq)) 106 if (IS_ERR(vq))
99 return PTR_ERR(vq); 107 return PTR_ERR(vq);
100 108
101 err = hwrng_register(&virtio_hwrng); 109 err = hwrng_register(&virtio_hwrng);
102 if (err) { 110 if (err) {
103 vdev->config->del_vq(vq); 111 vdev->config->del_vqs(vdev);
104 return err; 112 return err;
105 } 113 }
106 114
@@ -112,7 +120,7 @@ static void virtrng_remove(struct virtio_device *vdev)
112{ 120{
113 vdev->config->reset(vdev); 121 vdev->config->reset(vdev);
114 hwrng_unregister(&virtio_hwrng); 122 hwrng_unregister(&virtio_hwrng);
115 vdev->config->del_vq(vq); 123 vdev->config->del_vqs(vdev);
116} 124}
117 125
118static struct virtio_device_id id_table[] = { 126static struct virtio_device_id id_table[] = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ff6f5a4b58fb..c74dacfa6795 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq)
188 * Finally we put our input buffer in the input queue, ready to receive. */ 188 * Finally we put our input buffer in the input queue, ready to receive. */
189static int __devinit virtcons_probe(struct virtio_device *dev) 189static int __devinit virtcons_probe(struct virtio_device *dev)
190{ 190{
191 vq_callback_t *callbacks[] = { hvc_handle_input, NULL};
192 const char *names[] = { "input", "output" };
193 struct virtqueue *vqs[2];
191 int err; 194 int err;
192 195
193 vdev = dev; 196 vdev = dev;
@@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
199 goto fail; 202 goto fail;
200 } 203 }
201 204
202 /* Find the input queue. */ 205 /* Find the queues. */
203 /* FIXME: This is why we want to wean off hvc: we do nothing 206 /* FIXME: This is why we want to wean off hvc: we do nothing
204 * when input comes in. */ 207 * when input comes in. */
205 in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input); 208 err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
206 if (IS_ERR(in_vq)) { 209 if (err)
207 err = PTR_ERR(in_vq);
208 goto free; 210 goto free;
209 }
210 211
211 out_vq = vdev->config->find_vq(vdev, 1, NULL); 212 in_vq = vqs[0];
212 if (IS_ERR(out_vq)) { 213 out_vq = vqs[1];
213 err = PTR_ERR(out_vq);
214 goto free_in_vq;
215 }
216 214
217 /* Start using the new console output. */ 215 /* Start using the new console output. */
218 virtio_cons.get_chars = get_chars; 216 virtio_cons.get_chars = get_chars;
@@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
233 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); 231 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
234 if (IS_ERR(hvc)) { 232 if (IS_ERR(hvc)) {
235 err = PTR_ERR(hvc); 233 err = PTR_ERR(hvc);
236 goto free_out_vq; 234 goto free_vqs;
237 } 235 }
238 236
239 /* Register the input buffer the first time. */ 237 /* Register the input buffer the first time. */
240 add_inbuf(); 238 add_inbuf();
241 return 0; 239 return 0;
242 240
243free_out_vq: 241free_vqs:
244 vdev->config->del_vq(out_vq); 242 vdev->config->del_vqs(vdev);
245free_in_vq:
246 vdev->config->del_vq(in_vq);
247free: 243free:
248 kfree(inbuf); 244 kfree(inbuf);
249fail: 245fail:
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index de9ebee8657b..c796a86ab7f3 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -103,7 +103,6 @@
103#include <linux/io.h> 103#include <linux/io.h>
104#include <asm/system.h> 104#include <asm/system.h>
105#include <linux/uaccess.h> 105#include <linux/uaccess.h>
106#include <linux/kmemleak.h>
107 106
108#define MAX_NR_CON_DRIVER 16 107#define MAX_NR_CON_DRIVER 16
109 108
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index edb02530e461..11f373971fa5 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -69,7 +69,7 @@ comment "Memory mapped GPIO expanders:"
69 69
70config GPIO_XILINX 70config GPIO_XILINX
71 bool "Xilinx GPIO support" 71 bool "Xilinx GPIO support"
72 depends on PPC_OF 72 depends on PPC_OF || MICROBLAZE
73 help 73 help
74 Say yes here to support the Xilinx FPGA GPIO device 74 Say yes here to support the Xilinx FPGA GPIO device
75 75
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 0411d912d82a..80a257554b30 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -371,7 +371,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
371 list->user_token = list->hash.key << PAGE_SHIFT; 371 list->user_token = list->hash.key << PAGE_SHIFT;
372 mutex_unlock(&dev->struct_mutex); 372 mutex_unlock(&dev->struct_mutex);
373 373
374 list->master = dev->primary->master; 374 if (!(map->flags & _DRM_DRIVER))
375 list->master = dev->primary->master;
375 *maplist = list; 376 *maplist = list;
376 return 0; 377 return 0;
377 } 378 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6f6b26479d82..801a0d0e0810 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -589,85 +589,13 @@ int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
589} 589}
590EXPORT_SYMBOL(drm_do_probe_ddc_edid); 590EXPORT_SYMBOL(drm_do_probe_ddc_edid);
591 591
592/**
593 * Get EDID information.
594 *
595 * \param adapter : i2c device adaptor.
596 * \param buf : EDID data buffer to be filled
597 * \param len : EDID data buffer length
598 * \return 0 on success or -1 on failure.
599 *
600 * Initialize DDC, then fetch EDID information
601 * by calling drm_do_probe_ddc_edid function.
602 */
603static int drm_ddc_read(struct i2c_adapter *adapter,
604 unsigned char *buf, int len)
605{
606 struct i2c_algo_bit_data *algo_data = adapter->algo_data;
607 int i, j;
608 int ret = -1;
609
610 algo_data->setscl(algo_data->data, 1);
611
612 for (i = 0; i < 1; i++) {
613 /* For some old monitors we need the
614 * following process to initialize/stop DDC
615 */
616 algo_data->setsda(algo_data->data, 1);
617 msleep(13);
618
619 algo_data->setscl(algo_data->data, 1);
620 for (j = 0; j < 5; j++) {
621 msleep(10);
622 if (algo_data->getscl(algo_data->data))
623 break;
624 }
625 if (j == 5)
626 continue;
627
628 algo_data->setsda(algo_data->data, 0);
629 msleep(15);
630 algo_data->setscl(algo_data->data, 0);
631 msleep(15);
632 algo_data->setsda(algo_data->data, 1);
633 msleep(15);
634
635 /* Do the real work */
636 ret = drm_do_probe_ddc_edid(adapter, buf, len);
637 algo_data->setsda(algo_data->data, 0);
638 algo_data->setscl(algo_data->data, 0);
639 msleep(15);
640
641 algo_data->setscl(algo_data->data, 1);
642 for (j = 0; j < 10; j++) {
643 msleep(10);
644 if (algo_data->getscl(algo_data->data))
645 break;
646 }
647
648 algo_data->setsda(algo_data->data, 1);
649 msleep(15);
650 algo_data->setscl(algo_data->data, 0);
651 algo_data->setsda(algo_data->data, 0);
652 if (ret == 0)
653 break;
654 }
655 /* Release the DDC lines when done or the Apple Cinema HD display
656 * will switch off
657 */
658 algo_data->setsda(algo_data->data, 1);
659 algo_data->setscl(algo_data->data, 1);
660
661 return ret;
662}
663
664static int drm_ddc_read_edid(struct drm_connector *connector, 592static int drm_ddc_read_edid(struct drm_connector *connector,
665 struct i2c_adapter *adapter, 593 struct i2c_adapter *adapter,
666 char *buf, int len) 594 char *buf, int len)
667{ 595{
668 int ret; 596 int ret;
669 597
670 ret = drm_ddc_read(adapter, buf, len); 598 ret = drm_do_probe_ddc_edid(adapter, buf, len);
671 if (ret != 0) { 599 if (ret != 0) {
672 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", 600 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
673 drm_get_connector_name(connector)); 601 drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4984aa89cf3d..ec43005100d9 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -133,7 +133,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
133 133
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 135
136 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 136 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 137
138 obj->dev = dev; 138 obj->dev = dev;
139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index af539f7d87dd..ac35145c3e20 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -62,6 +62,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
62 } 62 }
63 return 0; 63 return 0;
64} 64}
65EXPORT_SYMBOL(drm_ht_create);
65 66
66void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) 67void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
67{ 68{
@@ -156,6 +157,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
156 } 157 }
157 return 0; 158 return 0;
158} 159}
160EXPORT_SYMBOL(drm_ht_just_insert_please);
159 161
160int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, 162int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
161 struct drm_hash_item **item) 163 struct drm_hash_item **item)
@@ -169,6 +171,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
169 *item = hlist_entry(list, struct drm_hash_item, head); 171 *item = hlist_entry(list, struct drm_hash_item, head);
170 return 0; 172 return 0;
171} 173}
174EXPORT_SYMBOL(drm_ht_find_item);
172 175
173int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) 176int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
174{ 177{
@@ -202,3 +205,4 @@ void drm_ht_remove(struct drm_open_hash *ht)
202 ht->table = NULL; 205 ht->table = NULL;
203 } 206 }
204} 207}
208EXPORT_SYMBOL(drm_ht_remove);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 367c590ffbba..7819fd930a51 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,8 +42,11 @@
42 */ 42 */
43 43
44#include "drmP.h" 44#include "drmP.h"
45#include "drm_mm.h"
45#include <linux/slab.h> 46#include <linux/slab.h>
46 47
48#define MM_UNUSED_TARGET 4
49
47unsigned long drm_mm_tail_space(struct drm_mm *mm) 50unsigned long drm_mm_tail_space(struct drm_mm *mm)
48{ 51{
49 struct list_head *tail_node; 52 struct list_head *tail_node;
@@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
74 return 0; 77 return 0;
75} 78}
76 79
80static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81{
82 struct drm_mm_node *child;
83
84 if (atomic)
85 child = kmalloc(sizeof(*child), GFP_ATOMIC);
86 else
87 child = kmalloc(sizeof(*child), GFP_KERNEL);
88
89 if (unlikely(child == NULL)) {
90 spin_lock(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
92 child = NULL;
93 else {
94 child =
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
98 --mm->num_unused;
99 }
100 spin_unlock(&mm->unused_lock);
101 }
102 return child;
103}
104
105int drm_mm_pre_get(struct drm_mm *mm)
106{
107 struct drm_mm_node *node;
108
109 spin_lock(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 spin_unlock(&mm->unused_lock);
112 node = kmalloc(sizeof(*node), GFP_KERNEL);
113 spin_lock(&mm->unused_lock);
114
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 spin_unlock(&mm->unused_lock);
118 return ret;
119 }
120 ++mm->num_unused;
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
122 }
123 spin_unlock(&mm->unused_lock);
124 return 0;
125}
126EXPORT_SYMBOL(drm_mm_pre_get);
77 127
78static int drm_mm_create_tail_node(struct drm_mm *mm, 128static int drm_mm_create_tail_node(struct drm_mm *mm,
79 unsigned long start, 129 unsigned long start,
80 unsigned long size) 130 unsigned long size, int atomic)
81{ 131{
82 struct drm_mm_node *child; 132 struct drm_mm_node *child;
83 133
84 child = (struct drm_mm_node *) 134 child = drm_mm_kmalloc(mm, atomic);
85 drm_alloc(sizeof(*child), DRM_MEM_MM); 135 if (unlikely(child == NULL))
86 if (!child)
87 return -ENOMEM; 136 return -ENOMEM;
88 137
89 child->free = 1; 138 child->free = 1;
@@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
97 return 0; 146 return 0;
98} 147}
99 148
100 149int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
101int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
102{ 150{
103 struct list_head *tail_node; 151 struct list_head *tail_node;
104 struct drm_mm_node *entry; 152 struct drm_mm_node *entry;
@@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
106 tail_node = mm->ml_entry.prev; 154 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
108 if (!entry->free) { 156 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size); 157 return drm_mm_create_tail_node(mm, entry->start + entry->size,
158 size, atomic);
110 } 159 }
111 entry->size += size; 160 entry->size += size;
112 return 0; 161 return 0;
113} 162}
114 163
115static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 164static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
116 unsigned long size) 165 unsigned long size,
166 int atomic)
117{ 167{
118 struct drm_mm_node *child; 168 struct drm_mm_node *child;
119 169
120 child = (struct drm_mm_node *) 170 child = drm_mm_kmalloc(parent->mm, atomic);
121 drm_alloc(sizeof(*child), DRM_MEM_MM); 171 if (unlikely(child == NULL))
122 if (!child)
123 return NULL; 172 return NULL;
124 173
125 INIT_LIST_HEAD(&child->fl_entry); 174 INIT_LIST_HEAD(&child->fl_entry);
@@ -151,8 +200,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
151 tmp = parent->start % alignment; 200 tmp = parent->start % alignment;
152 201
153 if (tmp) { 202 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); 203 align_splitoff =
155 if (!align_splitoff) 204 drm_mm_split_at_start(parent, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL))
156 return NULL; 206 return NULL;
157 } 207 }
158 208
@@ -161,7 +211,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
161 parent->free = 0; 211 parent->free = 0;
162 return parent; 212 return parent;
163 } else { 213 } else {
164 child = drm_mm_split_at_start(parent, size); 214 child = drm_mm_split_at_start(parent, size, 0);
165 } 215 }
166 216
167 if (align_splitoff) 217 if (align_splitoff)
@@ -169,14 +219,49 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
169 219
170 return child; 220 return child;
171} 221}
222
172EXPORT_SYMBOL(drm_mm_get_block); 223EXPORT_SYMBOL(drm_mm_get_block);
173 224
225struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
226 unsigned long size,
227 unsigned alignment)
228{
229
230 struct drm_mm_node *align_splitoff = NULL;
231 struct drm_mm_node *child;
232 unsigned tmp = 0;
233
234 if (alignment)
235 tmp = parent->start % alignment;
236
237 if (tmp) {
238 align_splitoff =
239 drm_mm_split_at_start(parent, alignment - tmp, 1);
240 if (unlikely(align_splitoff == NULL))
241 return NULL;
242 }
243
244 if (parent->size == size) {
245 list_del_init(&parent->fl_entry);
246 parent->free = 0;
247 return parent;
248 } else {
249 child = drm_mm_split_at_start(parent, size, 1);
250 }
251
252 if (align_splitoff)
253 drm_mm_put_block(align_splitoff);
254
255 return child;
256}
257EXPORT_SYMBOL(drm_mm_get_block_atomic);
258
174/* 259/*
175 * Put a block. Merge with the previous and / or next block if they are free. 260 * Put a block. Merge with the previous and / or next block if they are free.
176 * Otherwise add to the free stack. 261 * Otherwise add to the free stack.
177 */ 262 */
178 263
179void drm_mm_put_block(struct drm_mm_node * cur) 264void drm_mm_put_block(struct drm_mm_node *cur)
180{ 265{
181 266
182 struct drm_mm *mm = cur->mm; 267 struct drm_mm *mm = cur->mm;
@@ -188,21 +273,27 @@ void drm_mm_put_block(struct drm_mm_node * cur)
188 int merged = 0; 273 int merged = 0;
189 274
190 if (cur_head->prev != root_head) { 275 if (cur_head->prev != root_head) {
191 prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 276 prev_node =
277 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
192 if (prev_node->free) { 278 if (prev_node->free) {
193 prev_node->size += cur->size; 279 prev_node->size += cur->size;
194 merged = 1; 280 merged = 1;
195 } 281 }
196 } 282 }
197 if (cur_head->next != root_head) { 283 if (cur_head->next != root_head) {
198 next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); 284 next_node =
285 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
199 if (next_node->free) { 286 if (next_node->free) {
200 if (merged) { 287 if (merged) {
201 prev_node->size += next_node->size; 288 prev_node->size += next_node->size;
202 list_del(&next_node->ml_entry); 289 list_del(&next_node->ml_entry);
203 list_del(&next_node->fl_entry); 290 list_del(&next_node->fl_entry);
204 drm_free(next_node, sizeof(*next_node), 291 if (mm->num_unused < MM_UNUSED_TARGET) {
205 DRM_MEM_MM); 292 list_add(&next_node->fl_entry,
293 &mm->unused_nodes);
294 ++mm->num_unused;
295 } else
296 kfree(next_node);
206 } else { 297 } else {
207 next_node->size += cur->size; 298 next_node->size += cur->size;
208 next_node->start = cur->start; 299 next_node->start = cur->start;
@@ -215,14 +306,19 @@ void drm_mm_put_block(struct drm_mm_node * cur)
215 list_add(&cur->fl_entry, &mm->fl_entry); 306 list_add(&cur->fl_entry, &mm->fl_entry);
216 } else { 307 } else {
217 list_del(&cur->ml_entry); 308 list_del(&cur->ml_entry);
218 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 309 if (mm->num_unused < MM_UNUSED_TARGET) {
310 list_add(&cur->fl_entry, &mm->unused_nodes);
311 ++mm->num_unused;
312 } else
313 kfree(cur);
219 } 314 }
220} 315}
316
221EXPORT_SYMBOL(drm_mm_put_block); 317EXPORT_SYMBOL(drm_mm_put_block);
222 318
223struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 319struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
224 unsigned long size, 320 unsigned long size,
225 unsigned alignment, int best_match) 321 unsigned alignment, int best_match)
226{ 322{
227 struct list_head *list; 323 struct list_head *list;
228 const struct list_head *free_stack = &mm->fl_entry; 324 const struct list_head *free_stack = &mm->fl_entry;
@@ -247,7 +343,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
247 wasted += alignment - tmp; 343 wasted += alignment - tmp;
248 } 344 }
249 345
250
251 if (entry->size >= size + wasted) { 346 if (entry->size >= size + wasted) {
252 if (!best_match) 347 if (!best_match)
253 return entry; 348 return entry;
@@ -260,6 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
260 355
261 return best; 356 return best;
262} 357}
358EXPORT_SYMBOL(drm_mm_search_free);
263 359
264int drm_mm_clean(struct drm_mm * mm) 360int drm_mm_clean(struct drm_mm * mm)
265{ 361{
@@ -267,14 +363,17 @@ int drm_mm_clean(struct drm_mm * mm)
267 363
268 return (head->next->next == head); 364 return (head->next->next == head);
269} 365}
270EXPORT_SYMBOL(drm_mm_search_free); 366EXPORT_SYMBOL(drm_mm_clean);
271 367
272int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 368int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
273{ 369{
274 INIT_LIST_HEAD(&mm->ml_entry); 370 INIT_LIST_HEAD(&mm->ml_entry);
275 INIT_LIST_HEAD(&mm->fl_entry); 371 INIT_LIST_HEAD(&mm->fl_entry);
372 INIT_LIST_HEAD(&mm->unused_nodes);
373 mm->num_unused = 0;
374 spin_lock_init(&mm->unused_lock);
276 375
277 return drm_mm_create_tail_node(mm, start, size); 376 return drm_mm_create_tail_node(mm, start, size, 0);
278} 377}
279EXPORT_SYMBOL(drm_mm_init); 378EXPORT_SYMBOL(drm_mm_init);
280 379
@@ -282,6 +381,7 @@ void drm_mm_takedown(struct drm_mm * mm)
282{ 381{
283 struct list_head *bnode = mm->fl_entry.next; 382 struct list_head *bnode = mm->fl_entry.next;
284 struct drm_mm_node *entry; 383 struct drm_mm_node *entry;
384 struct drm_mm_node *next;
285 385
286 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 386 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
287 387
@@ -293,7 +393,16 @@ void drm_mm_takedown(struct drm_mm * mm)
293 393
294 list_del(&entry->fl_entry); 394 list_del(&entry->fl_entry);
295 list_del(&entry->ml_entry); 395 list_del(&entry->ml_entry);
396 kfree(entry);
397
398 spin_lock(&mm->unused_lock);
399 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
400 list_del(&entry->fl_entry);
401 kfree(entry);
402 --mm->num_unused;
403 }
404 spin_unlock(&mm->unused_lock);
296 405
297 drm_free(entry, sizeof(*entry), DRM_MEM_MM); 406 BUG_ON(mm->num_unused != 0);
298} 407}
299EXPORT_SYMBOL(drm_mm_takedown); 408EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c9b80fdd4630..54f492a488a9 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -38,6 +38,7 @@
38#include "drm.h" 38#include "drm.h"
39#include "drm_crtc.h" 39#include "drm_crtc.h"
40 40
41#define DRM_MODESET_DEBUG "drm_mode"
41/** 42/**
42 * drm_mode_debug_printmodeline - debug print a mode 43 * drm_mode_debug_printmodeline - debug print a mode
43 * @dev: DRM device 44 * @dev: DRM device
@@ -50,12 +51,13 @@
50 */ 51 */
51void drm_mode_debug_printmodeline(struct drm_display_mode *mode) 52void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
52{ 53{
53 DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", 54 DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
54 mode->base.id, mode->name, mode->vrefresh, mode->clock, 55 "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
55 mode->hdisplay, mode->hsync_start, 56 mode->base.id, mode->name, mode->vrefresh, mode->clock,
56 mode->hsync_end, mode->htotal, 57 mode->hdisplay, mode->hsync_start,
57 mode->vdisplay, mode->vsync_start, 58 mode->hsync_end, mode->htotal,
58 mode->vsync_end, mode->vtotal, mode->type, mode->flags); 59 mode->vdisplay, mode->vsync_start,
60 mode->vsync_end, mode->vtotal, mode->type, mode->flags);
59} 61}
60EXPORT_SYMBOL(drm_mode_debug_printmodeline); 62EXPORT_SYMBOL(drm_mode_debug_printmodeline);
61 63
@@ -401,7 +403,9 @@ void drm_mode_prune_invalid(struct drm_device *dev,
401 list_del(&mode->head); 403 list_del(&mode->head);
402 if (verbose) { 404 if (verbose) {
403 drm_mode_debug_printmodeline(mode); 405 drm_mode_debug_printmodeline(mode);
404 DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status); 406 DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
407 "Not using %s mode %d\n",
408 mode->name, mode->status);
405 } 409 }
406 drm_mode_destroy(dev, mode); 410 drm_mode_destroy(dev, mode);
407 } 411 }
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b9631e3a1ea6..89050684fe0d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -51,7 +51,22 @@ struct idr drm_minors_idr;
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root; 53struct dentry *drm_debugfs_root;
54 54void drm_ut_debug_printk(unsigned int request_level,
55 const char *prefix,
56 const char *function_name,
57 const char *format, ...)
58{
59 va_list args;
60
61 if (drm_debug & request_level) {
62 if (function_name)
63 printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
64 va_start(args, format);
65 vprintk(format, args);
66 va_end(args);
67 }
68}
69EXPORT_SYMBOL(drm_ut_debug_printk);
55static int drm_minor_get_id(struct drm_device *dev, int type) 70static int drm_minor_get_id(struct drm_device *dev, int type)
56{ 71{
57 int new_id; 72 int new_id;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0ccb63ee50ee..1a60626f6803 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,8 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#define I915_DRV "i915_drv"
37
36/* Really want an OS-independent resettable timer. Would like to have 38/* Really want an OS-independent resettable timer. Would like to have
37 * this loop run for (eg) 3 sec, but have the timer reset every time 39 * this loop run for (eg) 3 sec, but have the timer reset every time
38 * the head pointer changes, so that EBUSY only happens if the ring 40 * the head pointer changes, so that EBUSY only happens if the ring
@@ -99,7 +101,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
99 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 101 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
100 102
101 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 103 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
102 DRM_DEBUG("Enabled hardware status page\n"); 104 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
103 return 0; 105 return 0;
104} 106}
105 107
@@ -185,7 +187,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
185 master_priv->sarea_priv = (drm_i915_sarea_t *) 187 master_priv->sarea_priv = (drm_i915_sarea_t *)
186 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 188 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
187 } else { 189 } else {
188 DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); 190 DRM_DEBUG_DRIVER(I915_DRV,
191 "sarea not found assuming DRI2 userspace\n");
189 } 192 }
190 193
191 if (init->ring_size != 0) { 194 if (init->ring_size != 0) {
@@ -235,7 +238,7 @@ static int i915_dma_resume(struct drm_device * dev)
235{ 238{
236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
237 240
238 DRM_DEBUG("%s\n", __func__); 241 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
239 242
240 if (dev_priv->ring.map.handle == NULL) { 243 if (dev_priv->ring.map.handle == NULL) {
241 DRM_ERROR("can not ioremap virtual address for" 244 DRM_ERROR("can not ioremap virtual address for"
@@ -248,13 +251,14 @@ static int i915_dma_resume(struct drm_device * dev)
248 DRM_ERROR("Can not find hardware status page\n"); 251 DRM_ERROR("Can not find hardware status page\n");
249 return -EINVAL; 252 return -EINVAL;
250 } 253 }
251 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 254 DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
255 dev_priv->hw_status_page);
252 256
253 if (dev_priv->status_gfx_addr != 0) 257 if (dev_priv->status_gfx_addr != 0)
254 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 258 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
255 else 259 else
256 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 260 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
257 DRM_DEBUG("Enabled hardware status page\n"); 261 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
258 262
259 return 0; 263 return 0;
260} 264}
@@ -548,10 +552,10 @@ static int i915_dispatch_flip(struct drm_device * dev)
548 if (!master_priv->sarea_priv) 552 if (!master_priv->sarea_priv)
549 return -EINVAL; 553 return -EINVAL;
550 554
551 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 555 DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
552 __func__, 556 __func__,
553 dev_priv->current_page, 557 dev_priv->current_page,
554 master_priv->sarea_priv->pf_current_page); 558 master_priv->sarea_priv->pf_current_page);
555 559
556 i915_kernel_lost_context(dev); 560 i915_kernel_lost_context(dev);
557 561
@@ -629,8 +633,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
629 return -EINVAL; 633 return -EINVAL;
630 } 634 }
631 635
632 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 636 DRM_DEBUG_DRIVER(I915_DRV,
633 batch->start, batch->used, batch->num_cliprects); 637 "i915 batchbuffer, start %x used %d cliprects %d\n",
638 batch->start, batch->used, batch->num_cliprects);
634 639
635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 640 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
636 641
@@ -678,8 +683,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
678 void *batch_data; 683 void *batch_data;
679 int ret; 684 int ret;
680 685
681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 686 DRM_DEBUG_DRIVER(I915_DRV,
682 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 687 "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
688 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
683 689
684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 690 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685 691
@@ -734,7 +740,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
734{ 740{
735 int ret; 741 int ret;
736 742
737 DRM_DEBUG("%s\n", __func__); 743 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
738 744
739 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 745 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
740 746
@@ -777,7 +783,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
777 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 783 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
778 break; 784 break;
779 default: 785 default:
780 DRM_DEBUG("Unknown parameter %d\n", param->param); 786 DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
787 param->param);
781 return -EINVAL; 788 return -EINVAL;
782 } 789 }
783 790
@@ -817,7 +824,8 @@ static int i915_setparam(struct drm_device *dev, void *data,
817 dev_priv->fence_reg_start = param->value; 824 dev_priv->fence_reg_start = param->value;
818 break; 825 break;
819 default: 826 default:
820 DRM_DEBUG("unknown parameter %d\n", param->param); 827 DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
828 param->param);
821 return -EINVAL; 829 return -EINVAL;
822 } 830 }
823 831
@@ -865,9 +873,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
865 873
866 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 874 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
867 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 875 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
868 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 876 DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
869 dev_priv->status_gfx_addr); 877 dev_priv->status_gfx_addr);
870 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 878 DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
879 dev_priv->hw_status_page);
871 return 0; 880 return 0;
872} 881}
873 882
@@ -922,7 +931,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
922 * Some of the preallocated space is taken by the GTT 931 * Some of the preallocated space is taken by the GTT
923 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 932 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
924 */ 933 */
925 if (IS_G4X(dev) || IS_IGD(dev)) 934 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
926 overhead = 4096; 935 overhead = 4096;
927 else 936 else
928 overhead = (*aperture_size / 1024) + 4096; 937 overhead = (*aperture_size / 1024) + 4096;
@@ -1153,8 +1162,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1153#endif 1162#endif
1154 1163
1155 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1164 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1156 if (IS_GM45(dev)) 1165 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1166 if (IS_G4X(dev) || IS_IGDNG(dev)) {
1167 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1157 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1168 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1169 }
1158 1170
1159 i915_gem_load(dev); 1171 i915_gem_load(dev);
1160 1172
@@ -1198,7 +1210,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1198 } 1210 }
1199 1211
1200 /* Must be done after probing outputs */ 1212 /* Must be done after probing outputs */
1201 intel_opregion_init(dev, 0); 1213 /* FIXME: verify on IGDNG */
1214 if (!IS_IGDNG(dev))
1215 intel_opregion_init(dev, 0);
1202 1216
1203 return 0; 1217 return 0;
1204 1218
@@ -1232,7 +1246,8 @@ int i915_driver_unload(struct drm_device *dev)
1232 if (dev_priv->regs != NULL) 1246 if (dev_priv->regs != NULL)
1233 iounmap(dev_priv->regs); 1247 iounmap(dev_priv->regs);
1234 1248
1235 intel_opregion_free(dev, 0); 1249 if (!IS_IGDNG(dev))
1250 intel_opregion_free(dev, 0);
1236 1251
1237 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1252 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1238 intel_modeset_cleanup(dev); 1253 intel_modeset_cleanup(dev);
@@ -1256,7 +1271,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1256{ 1271{
1257 struct drm_i915_file_private *i915_file_priv; 1272 struct drm_i915_file_private *i915_file_priv;
1258 1273
1259 DRM_DEBUG("\n"); 1274 DRM_DEBUG_DRIVER(I915_DRV, "\n");
1260 i915_file_priv = (struct drm_i915_file_private *) 1275 i915_file_priv = (struct drm_i915_file_private *)
1261 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1276 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
1262 1277
@@ -1265,8 +1280,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1265 1280
1266 file_priv->driver_priv = i915_file_priv; 1281 file_priv->driver_priv = i915_file_priv;
1267 1282
1268 i915_file_priv->mm.last_gem_seqno = 0; 1283 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1269 i915_file_priv->mm.last_gem_throttle_seqno = 0;
1270 1284
1271 return 0; 1285 return 0;
1272} 1286}
@@ -1303,6 +1317,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1303void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1317void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1304{ 1318{
1305 drm_i915_private_t *dev_priv = dev->dev_private; 1319 drm_i915_private_t *dev_priv = dev->dev_private;
1320 i915_gem_release(dev, file_priv);
1306 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1321 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1307 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 1322 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1308} 1323}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c431fa54bbb5..8ef6bcec211b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,13 @@ struct drm_i915_fence_reg {
126 struct drm_gem_object *obj; 126 struct drm_gem_object *obj;
127}; 127};
128 128
129struct sdvo_device_mapping {
130 u8 dvo_port;
131 u8 slave_addr;
132 u8 dvo_wiring;
133 u8 initialized;
134};
135
129typedef struct drm_i915_private { 136typedef struct drm_i915_private {
130 struct drm_device *dev; 137 struct drm_device *dev;
131 138
@@ -143,6 +150,8 @@ typedef struct drm_i915_private {
143 drm_local_map_t hws_map; 150 drm_local_map_t hws_map;
144 struct drm_gem_object *hws_obj; 151 struct drm_gem_object *hws_obj;
145 152
153 struct resource mch_res;
154
146 unsigned int cpp; 155 unsigned int cpp;
147 int back_offset; 156 int back_offset;
148 int front_offset; 157 int front_offset;
@@ -158,6 +167,11 @@ typedef struct drm_i915_private {
158 /** Cached value of IMR to avoid reads in updating the bitfield */ 167 /** Cached value of IMR to avoid reads in updating the bitfield */
159 u32 irq_mask_reg; 168 u32 irq_mask_reg;
160 u32 pipestat[2]; 169 u32 pipestat[2];
170 /** splitted irq regs for graphics and display engine on IGDNG,
171 irq_mask_reg is still used for display irq. */
172 u32 gt_irq_mask_reg;
173 u32 gt_irq_enable_reg;
174 u32 de_irq_enable_reg;
161 175
162 u32 hotplug_supported_mask; 176 u32 hotplug_supported_mask;
163 struct work_struct hotplug_work; 177 struct work_struct hotplug_work;
@@ -285,6 +299,13 @@ typedef struct drm_i915_private {
285 u8 saveDACMASK; 299 u8 saveDACMASK;
286 u8 saveCR[37]; 300 u8 saveCR[37];
287 uint64_t saveFENCE[16]; 301 uint64_t saveFENCE[16];
302 u32 saveCURACNTR;
303 u32 saveCURAPOS;
304 u32 saveCURABASE;
305 u32 saveCURBCNTR;
306 u32 saveCURBPOS;
307 u32 saveCURBBASE;
308 u32 saveCURSIZE;
288 309
289 struct { 310 struct {
290 struct drm_mm gtt_space; 311 struct drm_mm gtt_space;
@@ -382,6 +403,7 @@ typedef struct drm_i915_private {
382 /* storage for physical objects */ 403 /* storage for physical objects */
383 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 404 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
384 } mm; 405 } mm;
406 struct sdvo_device_mapping sdvo_mappings[2];
385} drm_i915_private_t; 407} drm_i915_private_t;
386 408
387/** driver private structure attached to each drm_gem_object */ 409/** driver private structure attached to each drm_gem_object */
@@ -491,13 +513,16 @@ struct drm_i915_gem_request {
491 /** Time at which this request was emitted, in jiffies. */ 513 /** Time at which this request was emitted, in jiffies. */
492 unsigned long emitted_jiffies; 514 unsigned long emitted_jiffies;
493 515
516 /** global list entry for this request */
494 struct list_head list; 517 struct list_head list;
518
519 /** file_priv list entry for this request */
520 struct list_head client_list;
495}; 521};
496 522
497struct drm_i915_file_private { 523struct drm_i915_file_private {
498 struct { 524 struct {
499 uint32_t last_gem_seqno; 525 struct list_head request_list;
500 uint32_t last_gem_throttle_seqno;
501 } mm; 526 } mm;
502}; 527};
503 528
@@ -642,6 +667,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
642void i915_gem_free_all_phys_object(struct drm_device *dev); 667void i915_gem_free_all_phys_object(struct drm_device *dev);
643int i915_gem_object_get_pages(struct drm_gem_object *obj); 668int i915_gem_object_get_pages(struct drm_gem_object *obj);
644void i915_gem_object_put_pages(struct drm_gem_object *obj); 669void i915_gem_object_put_pages(struct drm_gem_object *obj);
670void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
645 671
646/* i915_gem_tiling.c */ 672/* i915_gem_tiling.c */
647void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 673void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -785,7 +811,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
785 (dev)->pci_device == 0x2E02 || \ 811 (dev)->pci_device == 0x2E02 || \
786 (dev)->pci_device == 0x2E12 || \ 812 (dev)->pci_device == 0x2E12 || \
787 (dev)->pci_device == 0x2E22 || \ 813 (dev)->pci_device == 0x2E22 || \
788 (dev)->pci_device == 0x2E32) 814 (dev)->pci_device == 0x2E32 || \
815 (dev)->pci_device == 0x0042 || \
816 (dev)->pci_device == 0x0046)
789 817
790#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ 818#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
791 (dev)->pci_device == 0x2A12) 819 (dev)->pci_device == 0x2A12)
@@ -807,20 +835,26 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
807 (dev)->pci_device == 0x29D2 || \ 835 (dev)->pci_device == 0x29D2 || \
808 (IS_IGD(dev))) 836 (IS_IGD(dev)))
809 837
838#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
839#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
840#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
841
810#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 842#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
811 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 843 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
844 IS_IGDNG(dev))
812 845
813#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 846#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
814 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 847 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
815 IS_IGD(dev)) 848 IS_IGD(dev) || IS_IGDNG_M(dev))
816 849
817#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 850#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
851 IS_IGDNG(dev))
818/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 852/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
819 * rows, which changed the alignment requirements and fence programming. 853 * rows, which changed the alignment requirements and fence programming.
820 */ 854 */
821#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 855#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
822 IS_I915GM(dev))) 856 IS_I915GM(dev)))
823#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) 857#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
824#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 858#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
825 859
826#define PRIMARY_RINGBUFFER_SIZE (128*1024) 860#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 39f5c658ef5e..c0ae6bbbd9b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -989,10 +989,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
989 return -ENODEV; 989 return -ENODEV;
990 990
991 /* Only handle setting domains to types used by the CPU. */ 991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 992 if (write_domain & I915_GEM_GPU_DOMAINS)
993 return -EINVAL; 993 return -EINVAL;
994 994
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 995 if (read_domains & I915_GEM_GPU_DOMAINS)
996 return -EINVAL; 996 return -EINVAL;
997 997
998 /* Having something in the write domain implies it's in the read 998 /* Having something in the write domain implies it's in the read
@@ -1481,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1481 * Returned sequence numbers are nonzero on success. 1481 * Returned sequence numbers are nonzero on success.
1482 */ 1482 */
1483static uint32_t 1483static uint32_t
1484i915_add_request(struct drm_device *dev, uint32_t flush_domains) 1484i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1485 uint32_t flush_domains)
1485{ 1486{
1486 drm_i915_private_t *dev_priv = dev->dev_private; 1487 drm_i915_private_t *dev_priv = dev->dev_private;
1488 struct drm_i915_file_private *i915_file_priv = NULL;
1487 struct drm_i915_gem_request *request; 1489 struct drm_i915_gem_request *request;
1488 uint32_t seqno; 1490 uint32_t seqno;
1489 int was_empty; 1491 int was_empty;
1490 RING_LOCALS; 1492 RING_LOCALS;
1491 1493
1494 if (file_priv != NULL)
1495 i915_file_priv = file_priv->driver_priv;
1496
1492 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 1497 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1493 if (request == NULL) 1498 if (request == NULL)
1494 return 0; 1499 return 0;
@@ -1515,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1515 request->emitted_jiffies = jiffies; 1520 request->emitted_jiffies = jiffies;
1516 was_empty = list_empty(&dev_priv->mm.request_list); 1521 was_empty = list_empty(&dev_priv->mm.request_list);
1517 list_add_tail(&request->list, &dev_priv->mm.request_list); 1522 list_add_tail(&request->list, &dev_priv->mm.request_list);
1523 if (i915_file_priv) {
1524 list_add_tail(&request->client_list,
1525 &i915_file_priv->mm.request_list);
1526 } else {
1527 INIT_LIST_HEAD(&request->client_list);
1528 }
1518 1529
1519 /* Associate any objects on the flushing list matching the write 1530 /* Associate any objects on the flushing list matching the write
1520 * domain we're flushing with our flush. 1531 * domain we're flushing with our flush.
@@ -1664,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1664 i915_gem_retire_request(dev, request); 1675 i915_gem_retire_request(dev, request);
1665 1676
1666 list_del(&request->list); 1677 list_del(&request->list);
1678 list_del(&request->client_list);
1667 drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 1679 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1668 } else 1680 } else
1669 break; 1681 break;
@@ -1702,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1702 BUG_ON(seqno == 0); 1714 BUG_ON(seqno == 0);
1703 1715
1704 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1716 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1705 ier = I915_READ(IER); 1717 if (IS_IGDNG(dev))
1718 ier = I915_READ(DEIER) | I915_READ(GTIER);
1719 else
1720 ier = I915_READ(IER);
1706 if (!ier) { 1721 if (!ier) {
1707 DRM_ERROR("something (likely vbetool) disabled " 1722 DRM_ERROR("something (likely vbetool) disabled "
1708 "interrupts, re-enabling\n"); 1723 "interrupts, re-enabling\n");
@@ -1754,8 +1769,7 @@ i915_gem_flush(struct drm_device *dev,
1754 if (flush_domains & I915_GEM_DOMAIN_CPU) 1769 if (flush_domains & I915_GEM_DOMAIN_CPU)
1755 drm_agp_chipset_flush(dev); 1770 drm_agp_chipset_flush(dev);
1756 1771
1757 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | 1772 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1758 I915_GEM_DOMAIN_GTT)) {
1759 /* 1773 /*
1760 * read/write caches: 1774 * read/write caches:
1761 * 1775 *
@@ -1977,7 +1991,7 @@ i915_gem_evict_something(struct drm_device *dev)
1977 i915_gem_flush(dev, 1991 i915_gem_flush(dev,
1978 obj->write_domain, 1992 obj->write_domain,
1979 obj->write_domain); 1993 obj->write_domain);
1980 i915_add_request(dev, obj->write_domain); 1994 i915_add_request(dev, NULL, obj->write_domain);
1981 1995
1982 obj = NULL; 1996 obj = NULL;
1983 continue; 1997 continue;
@@ -1991,7 +2005,7 @@ i915_gem_evict_something(struct drm_device *dev)
1991 /* If we didn't do any of the above, there's nothing to be done 2005 /* If we didn't do any of the above, there's nothing to be done
1992 * and we just can't fit it in. 2006 * and we just can't fit it in.
1993 */ 2007 */
1994 return -ENOMEM; 2008 return -ENOSPC;
1995 } 2009 }
1996 return ret; 2010 return ret;
1997} 2011}
@@ -2006,7 +2020,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2006 if (ret != 0) 2020 if (ret != 0)
2007 break; 2021 break;
2008 } 2022 }
2009 if (ret == -ENOMEM) 2023 if (ret == -ENOSPC)
2010 return 0; 2024 return 0;
2011 return ret; 2025 return ret;
2012} 2026}
@@ -2215,7 +2229,7 @@ try_again:
2215 loff_t offset; 2229 loff_t offset;
2216 2230
2217 if (avail == 0) 2231 if (avail == 0)
2218 return -ENOMEM; 2232 return -ENOSPC;
2219 2233
2220 for (i = dev_priv->fence_reg_start; 2234 for (i = dev_priv->fence_reg_start;
2221 i < dev_priv->num_fence_regs; i++) { 2235 i < dev_priv->num_fence_regs; i++) {
@@ -2248,7 +2262,7 @@ try_again:
2248 i915_gem_flush(dev, 2262 i915_gem_flush(dev,
2249 I915_GEM_GPU_DOMAINS, 2263 I915_GEM_GPU_DOMAINS,
2250 I915_GEM_GPU_DOMAINS); 2264 I915_GEM_GPU_DOMAINS);
2251 seqno = i915_add_request(dev, 2265 seqno = i915_add_request(dev, NULL,
2252 I915_GEM_GPU_DOMAINS); 2266 I915_GEM_GPU_DOMAINS);
2253 if (seqno == 0) 2267 if (seqno == 0)
2254 return -ENOMEM; 2268 return -ENOMEM;
@@ -2364,7 +2378,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2364 spin_unlock(&dev_priv->mm.active_list_lock); 2378 spin_unlock(&dev_priv->mm.active_list_lock);
2365 if (lists_empty) { 2379 if (lists_empty) {
2366 DRM_ERROR("GTT full, but LRU list empty\n"); 2380 DRM_ERROR("GTT full, but LRU list empty\n");
2367 return -ENOMEM; 2381 return -ENOSPC;
2368 } 2382 }
2369 2383
2370 ret = i915_gem_evict_something(dev); 2384 ret = i915_gem_evict_something(dev);
@@ -2409,8 +2423,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2409 * wasn't in the GTT, there shouldn't be any way it could have been in 2423 * wasn't in the GTT, there shouldn't be any way it could have been in
2410 * a GPU cache 2424 * a GPU cache
2411 */ 2425 */
2412 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2426 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2413 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2427 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2414 2428
2415 return 0; 2429 return 0;
2416} 2430}
@@ -2452,7 +2466,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2452 2466
2453 /* Queue the GPU write cache flushing we need. */ 2467 /* Queue the GPU write cache flushing we need. */
2454 i915_gem_flush(dev, 0, obj->write_domain); 2468 i915_gem_flush(dev, 0, obj->write_domain);
2455 seqno = i915_add_request(dev, obj->write_domain); 2469 seqno = i915_add_request(dev, NULL, obj->write_domain);
2456 obj->write_domain = 0; 2470 obj->write_domain = 0;
2457 i915_gem_object_move_to_active(obj, seqno); 2471 i915_gem_object_move_to_active(obj, seqno);
2458} 2472}
@@ -3035,20 +3049,12 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3035 drm_i915_private_t *dev_priv = dev->dev_private; 3049 drm_i915_private_t *dev_priv = dev->dev_private;
3036 int nbox = exec->num_cliprects; 3050 int nbox = exec->num_cliprects;
3037 int i = 0, count; 3051 int i = 0, count;
3038 uint32_t exec_start, exec_len; 3052 uint32_t exec_start, exec_len;
3039 RING_LOCALS; 3053 RING_LOCALS;
3040 3054
3041 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3055 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3042 exec_len = (uint32_t) exec->batch_len; 3056 exec_len = (uint32_t) exec->batch_len;
3043 3057
3044 if ((exec_start | exec_len) & 0x7) {
3045 DRM_ERROR("alignment\n");
3046 return -EINVAL;
3047 }
3048
3049 if (!exec_start)
3050 return -EINVAL;
3051
3052 count = nbox ? nbox : 1; 3058 count = nbox ? nbox : 1;
3053 3059
3054 for (i = 0; i < count; i++) { 3060 for (i = 0; i < count; i++) {
@@ -3089,6 +3095,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3089/* Throttle our rendering by waiting until the ring has completed our requests 3095/* Throttle our rendering by waiting until the ring has completed our requests
3090 * emitted over 20 msec ago. 3096 * emitted over 20 msec ago.
3091 * 3097 *
3098 * Note that if we were to use the current jiffies each time around the loop,
3099 * we wouldn't escape the function with any frames outstanding if the time to
3100 * render a frame was over 20ms.
3101 *
3092 * This should get us reasonable parallelism between CPU and GPU but also 3102 * This should get us reasonable parallelism between CPU and GPU but also
3093 * relatively low latency when blocking on a particular request to finish. 3103 * relatively low latency when blocking on a particular request to finish.
3094 */ 3104 */
@@ -3097,15 +3107,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3097{ 3107{
3098 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 3108 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3099 int ret = 0; 3109 int ret = 0;
3100 uint32_t seqno; 3110 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3101 3111
3102 mutex_lock(&dev->struct_mutex); 3112 mutex_lock(&dev->struct_mutex);
3103 seqno = i915_file_priv->mm.last_gem_throttle_seqno; 3113 while (!list_empty(&i915_file_priv->mm.request_list)) {
3104 i915_file_priv->mm.last_gem_throttle_seqno = 3114 struct drm_i915_gem_request *request;
3105 i915_file_priv->mm.last_gem_seqno; 3115
3106 if (seqno) 3116 request = list_first_entry(&i915_file_priv->mm.request_list,
3107 ret = i915_wait_request(dev, seqno); 3117 struct drm_i915_gem_request,
3118 client_list);
3119
3120 if (time_after_eq(request->emitted_jiffies, recent_enough))
3121 break;
3122
3123 ret = i915_wait_request(dev, request->seqno);
3124 if (ret != 0)
3125 break;
3126 }
3108 mutex_unlock(&dev->struct_mutex); 3127 mutex_unlock(&dev->struct_mutex);
3128
3109 return ret; 3129 return ret;
3110} 3130}
3111 3131
@@ -3182,12 +3202,29 @@ err:
3182 return ret; 3202 return ret;
3183} 3203}
3184 3204
3205static int
3206i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3207 uint64_t exec_offset)
3208{
3209 uint32_t exec_start, exec_len;
3210
3211 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3212 exec_len = (uint32_t) exec->batch_len;
3213
3214 if ((exec_start | exec_len) & 0x7)
3215 return -EINVAL;
3216
3217 if (!exec_start)
3218 return -EINVAL;
3219
3220 return 0;
3221}
3222
3185int 3223int
3186i915_gem_execbuffer(struct drm_device *dev, void *data, 3224i915_gem_execbuffer(struct drm_device *dev, void *data,
3187 struct drm_file *file_priv) 3225 struct drm_file *file_priv)
3188{ 3226{
3189 drm_i915_private_t *dev_priv = dev->dev_private; 3227 drm_i915_private_t *dev_priv = dev->dev_private;
3190 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3191 struct drm_i915_gem_execbuffer *args = data; 3228 struct drm_i915_gem_execbuffer *args = data;
3192 struct drm_i915_gem_exec_object *exec_list = NULL; 3229 struct drm_i915_gem_exec_object *exec_list = NULL;
3193 struct drm_gem_object **object_list = NULL; 3230 struct drm_gem_object **object_list = NULL;
@@ -3312,7 +3349,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3312 break; 3349 break;
3313 3350
3314 /* error other than GTT full, or we've already tried again */ 3351 /* error other than GTT full, or we've already tried again */
3315 if (ret != -ENOMEM || pin_tries >= 1) { 3352 if (ret != -ENOSPC || pin_tries >= 1) {
3316 if (ret != -ERESTARTSYS) 3353 if (ret != -ERESTARTSYS)
3317 DRM_ERROR("Failed to pin buffers %d\n", ret); 3354 DRM_ERROR("Failed to pin buffers %d\n", ret);
3318 goto err; 3355 goto err;
@@ -3331,8 +3368,20 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3331 3368
3332 /* Set the pending read domains for the batch buffer to COMMAND */ 3369 /* Set the pending read domains for the batch buffer to COMMAND */
3333 batch_obj = object_list[args->buffer_count-1]; 3370 batch_obj = object_list[args->buffer_count-1];
3334 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; 3371 if (batch_obj->pending_write_domain) {
3335 batch_obj->pending_write_domain = 0; 3372 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3373 ret = -EINVAL;
3374 goto err;
3375 }
3376 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3377
3378 /* Sanity check the batch buffer, prior to moving objects */
3379 exec_offset = exec_list[args->buffer_count - 1].offset;
3380 ret = i915_gem_check_execbuffer (args, exec_offset);
3381 if (ret != 0) {
3382 DRM_ERROR("execbuf with invalid offset/length\n");
3383 goto err;
3384 }
3336 3385
3337 i915_verify_inactive(dev, __FILE__, __LINE__); 3386 i915_verify_inactive(dev, __FILE__, __LINE__);
3338 3387
@@ -3363,7 +3412,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3363 dev->invalidate_domains, 3412 dev->invalidate_domains,
3364 dev->flush_domains); 3413 dev->flush_domains);
3365 if (dev->flush_domains) 3414 if (dev->flush_domains)
3366 (void)i915_add_request(dev, dev->flush_domains); 3415 (void)i915_add_request(dev, file_priv,
3416 dev->flush_domains);
3367 } 3417 }
3368 3418
3369 for (i = 0; i < args->buffer_count; i++) { 3419 for (i = 0; i < args->buffer_count; i++) {
@@ -3381,8 +3431,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3381 } 3431 }
3382#endif 3432#endif
3383 3433
3384 exec_offset = exec_list[args->buffer_count - 1].offset;
3385
3386#if WATCH_EXEC 3434#if WATCH_EXEC
3387 i915_gem_dump_object(batch_obj, 3435 i915_gem_dump_object(batch_obj,
3388 args->batch_len, 3436 args->batch_len,
@@ -3412,9 +3460,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3412 * *some* interrupts representing completion of buffers that we can 3460 * *some* interrupts representing completion of buffers that we can
3413 * wait on when trying to clear up gtt space). 3461 * wait on when trying to clear up gtt space).
3414 */ 3462 */
3415 seqno = i915_add_request(dev, flush_domains); 3463 seqno = i915_add_request(dev, file_priv, flush_domains);
3416 BUG_ON(seqno == 0); 3464 BUG_ON(seqno == 0);
3417 i915_file_priv->mm.last_gem_seqno = seqno;
3418 for (i = 0; i < args->buffer_count; i++) { 3465 for (i = 0; i < args->buffer_count; i++) {
3419 struct drm_gem_object *obj = object_list[i]; 3466 struct drm_gem_object *obj = object_list[i];
3420 3467
@@ -3520,8 +3567,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3520 atomic_inc(&dev->pin_count); 3567 atomic_inc(&dev->pin_count);
3521 atomic_add(obj->size, &dev->pin_memory); 3568 atomic_add(obj->size, &dev->pin_memory);
3522 if (!obj_priv->active && 3569 if (!obj_priv->active &&
3523 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 3570 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3524 I915_GEM_DOMAIN_GTT)) == 0 &&
3525 !list_empty(&obj_priv->list)) 3571 !list_empty(&obj_priv->list))
3526 list_del_init(&obj_priv->list); 3572 list_del_init(&obj_priv->list);
3527 } 3573 }
@@ -3548,8 +3594,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
3548 */ 3594 */
3549 if (obj_priv->pin_count == 0) { 3595 if (obj_priv->pin_count == 0) {
3550 if (!obj_priv->active && 3596 if (!obj_priv->active &&
3551 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 3597 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3552 I915_GEM_DOMAIN_GTT)) == 0)
3553 list_move_tail(&obj_priv->list, 3598 list_move_tail(&obj_priv->list,
3554 &dev_priv->mm.inactive_list); 3599 &dev_priv->mm.inactive_list);
3555 atomic_dec(&dev->pin_count); 3600 atomic_dec(&dev->pin_count);
@@ -3653,15 +3698,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3653 struct drm_gem_object *obj; 3698 struct drm_gem_object *obj;
3654 struct drm_i915_gem_object *obj_priv; 3699 struct drm_i915_gem_object *obj_priv;
3655 3700
3656 mutex_lock(&dev->struct_mutex);
3657 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3701 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3658 if (obj == NULL) { 3702 if (obj == NULL) {
3659 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", 3703 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3660 args->handle); 3704 args->handle);
3661 mutex_unlock(&dev->struct_mutex);
3662 return -EBADF; 3705 return -EBADF;
3663 } 3706 }
3664 3707
3708 mutex_lock(&dev->struct_mutex);
3665 /* Update the active list for the hardware's current position. 3709 /* Update the active list for the hardware's current position.
3666 * Otherwise this only updates on a delayed timer or when irqs are 3710 * Otherwise this only updates on a delayed timer or when irqs are
3667 * actually unmasked, and our working set ends up being larger than 3711 * actually unmasked, and our working set ends up being larger than
@@ -3800,9 +3844,8 @@ i915_gem_idle(struct drm_device *dev)
3800 3844
3801 /* Flush the GPU along with all non-CPU write domains 3845 /* Flush the GPU along with all non-CPU write domains
3802 */ 3846 */
3803 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 3847 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3804 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 3848 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
3805 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3806 3849
3807 if (seqno == 0) { 3850 if (seqno == 0) {
3808 mutex_unlock(&dev->struct_mutex); 3851 mutex_unlock(&dev->struct_mutex);
@@ -4352,3 +4395,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4352 drm_agp_chipset_flush(dev); 4395 drm_agp_chipset_flush(dev);
4353 return 0; 4396 return 0;
4354} 4397}
4398
4399void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4400{
4401 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4402
4403 /* Clean up our request list when the client is going away, so that
4404 * later retire_requests won't dereference our soon-to-be-gone
4405 * file_priv.
4406 */
4407 mutex_lock(&dev->struct_mutex);
4408 while (!list_empty(&i915_file_priv->mm.request_list))
4409 list_del_init(i915_file_priv->mm.request_list.next);
4410 mutex_unlock(&dev->struct_mutex);
4411}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 540dd336e6ec..9a05cadaa4ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
28#include "linux/string.h" 30#include "linux/string.h"
29#include "linux/bitops.h" 31#include "linux/bitops.h"
30#include "drmP.h" 32#include "drmP.h"
@@ -81,6 +83,143 @@
81 * to match what the GPU expects. 83 * to match what the GPU expects.
82 */ 84 */
83 85
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 struct pci_dev *bridge_dev;
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
100 u32 temp_lo, temp_hi = 0;
101 u64 mchbar_addr;
102 int ret = 0;
103
104 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
105 if (!bridge_dev) {
106 DRM_DEBUG("no bridge dev?!\n");
107 ret = -ENODEV;
108 goto out;
109 }
110
111 if (IS_I965G(dev))
112 pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
113 pci_read_config_dword(bridge_dev, reg, &temp_lo);
114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
115
116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
117 if (mchbar_addr &&
118 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
119 ret = 0;
120 goto out_put;
121 }
122
123 /* Get some space for it */
124 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
125 MCHBAR_SIZE, MCHBAR_SIZE,
126 PCIBIOS_MIN_MEM,
127 0, pcibios_align_resource,
128 bridge_dev);
129 if (ret) {
130 DRM_DEBUG("failed bus alloc: %d\n", ret);
131 dev_priv->mch_res.start = 0;
132 goto out_put;
133 }
134
135 if (IS_I965G(dev))
136 pci_write_config_dword(bridge_dev, reg + 4,
137 upper_32_bits(dev_priv->mch_res.start));
138
139 pci_write_config_dword(bridge_dev, reg,
140 lower_32_bits(dev_priv->mch_res.start));
141out_put:
142 pci_dev_put(bridge_dev);
143out:
144 return ret;
145}
146
147/* Setup MCHBAR if possible, return true if we should disable it again */
148static bool
149intel_setup_mchbar(struct drm_device *dev)
150{
151 struct pci_dev *bridge_dev;
152 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
153 u32 temp;
154 bool need_disable = false, enabled;
155
156 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
157 if (!bridge_dev) {
158 DRM_DEBUG("no bridge dev?!\n");
159 goto out;
160 }
161
162 if (IS_I915G(dev) || IS_I915GM(dev)) {
163 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
164 enabled = !!(temp & DEVEN_MCHBAR_EN);
165 } else {
166 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
167 enabled = temp & 1;
168 }
169
170 /* If it's already enabled, don't have to do anything */
171 if (enabled)
172 goto out_put;
173
174 if (intel_alloc_mchbar_resource(dev))
175 goto out_put;
176
177 need_disable = true;
178
179 /* Space is allocated or reserved, so enable it. */
180 if (IS_I915G(dev) || IS_I915GM(dev)) {
181 pci_write_config_dword(bridge_dev, DEVEN_REG,
182 temp | DEVEN_MCHBAR_EN);
183 } else {
184 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
185 pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
186 }
187out_put:
188 pci_dev_put(bridge_dev);
189out:
190 return need_disable;
191}
192
193static void
194intel_teardown_mchbar(struct drm_device *dev, bool disable)
195{
196 drm_i915_private_t *dev_priv = dev->dev_private;
197 struct pci_dev *bridge_dev;
198 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
199 u32 temp;
200
201 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
202 if (!bridge_dev) {
203 DRM_DEBUG("no bridge dev?!\n");
204 return;
205 }
206
207 if (disable) {
208 if (IS_I915G(dev) || IS_I915GM(dev)) {
209 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
210 temp &= ~DEVEN_MCHBAR_EN;
211 pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
212 } else {
213 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
214 temp &= ~1;
215 pci_write_config_dword(bridge_dev, mchbar_reg, temp);
216 }
217 }
218
219 if (dev_priv->mch_res.start)
220 release_resource(&dev_priv->mch_res);
221}
222
84/** 223/**
85 * Detects bit 6 swizzling of address lookup between IGD access and CPU 224 * Detects bit 6 swizzling of address lookup between IGD access and CPU
86 * access through main memory. 225 * access through main memory.
@@ -91,6 +230,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
91 drm_i915_private_t *dev_priv = dev->dev_private; 230 drm_i915_private_t *dev_priv = dev->dev_private;
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 231 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 232 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
233 bool need_disable;
94 234
95 if (!IS_I9XX(dev)) { 235 if (!IS_I9XX(dev)) {
96 /* As far as we know, the 865 doesn't have these bit 6 236 /* As far as we know, the 865 doesn't have these bit 6
@@ -101,6 +241,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
101 } else if (IS_MOBILE(dev)) { 241 } else if (IS_MOBILE(dev)) {
102 uint32_t dcc; 242 uint32_t dcc;
103 243
244 /* Try to make sure MCHBAR is enabled before poking at it */
245 need_disable = intel_setup_mchbar(dev);
246
104 /* On mobile 9xx chipsets, channel interleave by the CPU is 247 /* On mobile 9xx chipsets, channel interleave by the CPU is
105 * determined by DCC. For single-channel, neither the CPU 248 * determined by DCC. For single-channel, neither the CPU
106 * nor the GPU do swizzling. For dual channel interleaved, 249 * nor the GPU do swizzling. For dual channel interleaved,
@@ -140,6 +283,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
140 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 283 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
141 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 284 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
142 } 285 }
286
287 intel_teardown_mchbar(dev, need_disable);
143 } else { 288 } else {
144 /* The 965, G33, and newer, have a very flexible memory 289 /* The 965, G33, and newer, have a very flexible memory
145 * configuration. It will enable dual-channel mode 290 * configuration. It will enable dual-channel mode
@@ -170,6 +315,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
170 } 315 }
171 } 316 }
172 317
318 /* FIXME: check with memory config on IGDNG */
319 if (IS_IGDNG(dev)) {
320 DRM_ERROR("disable tiling on IGDNG...\n");
321 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
322 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
323 }
324
173 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 325 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
174 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 326 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
175} 327}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 98bb4c878c4e..b86b7b7130c6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -58,6 +58,47 @@
58 DRM_I915_VBLANK_PIPE_B) 58 DRM_I915_VBLANK_PIPE_B)
59 59
60void 60void
61igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
62{
63 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
64 dev_priv->gt_irq_mask_reg &= ~mask;
65 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
66 (void) I915_READ(GTIMR);
67 }
68}
69
70static inline void
71igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
72{
73 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
74 dev_priv->gt_irq_mask_reg |= mask;
75 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
76 (void) I915_READ(GTIMR);
77 }
78}
79
80/* For display hotplug interrupt */
81void
82igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83{
84 if ((dev_priv->irq_mask_reg & mask) != 0) {
85 dev_priv->irq_mask_reg &= ~mask;
86 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
87 (void) I915_READ(DEIMR);
88 }
89}
90
91static inline void
92igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
93{
94 if ((dev_priv->irq_mask_reg & mask) != mask) {
95 dev_priv->irq_mask_reg |= mask;
96 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
97 (void) I915_READ(DEIMR);
98 }
99}
100
101void
61i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 102i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
62{ 103{
63 if ((dev_priv->irq_mask_reg & mask) != 0) { 104 if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -196,6 +237,47 @@ static void i915_hotplug_work_func(struct work_struct *work)
196 drm_sysfs_hotplug_event(dev); 237 drm_sysfs_hotplug_event(dev);
197} 238}
198 239
240irqreturn_t igdng_irq_handler(struct drm_device *dev)
241{
242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 int ret = IRQ_NONE;
244 u32 de_iir, gt_iir;
245 u32 new_de_iir, new_gt_iir;
246 struct drm_i915_master_private *master_priv;
247
248 de_iir = I915_READ(DEIIR);
249 gt_iir = I915_READ(GTIIR);
250
251 for (;;) {
252 if (de_iir == 0 && gt_iir == 0)
253 break;
254
255 ret = IRQ_HANDLED;
256
257 I915_WRITE(DEIIR, de_iir);
258 new_de_iir = I915_READ(DEIIR);
259 I915_WRITE(GTIIR, gt_iir);
260 new_gt_iir = I915_READ(GTIIR);
261
262 if (dev->primary->master) {
263 master_priv = dev->primary->master->driver_priv;
264 if (master_priv->sarea_priv)
265 master_priv->sarea_priv->last_dispatch =
266 READ_BREADCRUMB(dev_priv);
267 }
268
269 if (gt_iir & GT_USER_INTERRUPT) {
270 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
271 DRM_WAKEUP(&dev_priv->irq_queue);
272 }
273
274 de_iir = new_de_iir;
275 gt_iir = new_gt_iir;
276 }
277
278 return ret;
279}
280
199irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 281irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
200{ 282{
201 struct drm_device *dev = (struct drm_device *) arg; 283 struct drm_device *dev = (struct drm_device *) arg;
@@ -212,6 +294,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
212 294
213 atomic_inc(&dev_priv->irq_received); 295 atomic_inc(&dev_priv->irq_received);
214 296
297 if (IS_IGDNG(dev))
298 return igdng_irq_handler(dev);
299
215 iir = I915_READ(IIR); 300 iir = I915_READ(IIR);
216 301
217 if (IS_I965G(dev)) { 302 if (IS_I965G(dev)) {
@@ -349,8 +434,12 @@ void i915_user_irq_get(struct drm_device *dev)
349 unsigned long irqflags; 434 unsigned long irqflags;
350 435
351 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 436 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
352 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 437 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
353 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 438 if (IS_IGDNG(dev))
439 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
440 else
441 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
442 }
354 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 443 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
355} 444}
356 445
@@ -361,8 +450,12 @@ void i915_user_irq_put(struct drm_device *dev)
361 450
362 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 451 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
363 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 452 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
364 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 453 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
365 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 454 if (IS_IGDNG(dev))
455 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
456 else
457 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
458 }
366 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 459 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
367} 460}
368 461
@@ -455,6 +548,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
455 if (!(pipeconf & PIPEACONF_ENABLE)) 548 if (!(pipeconf & PIPEACONF_ENABLE))
456 return -EINVAL; 549 return -EINVAL;
457 550
551 if (IS_IGDNG(dev))
552 return 0;
553
458 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 554 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
459 if (IS_I965G(dev)) 555 if (IS_I965G(dev))
460 i915_enable_pipestat(dev_priv, pipe, 556 i915_enable_pipestat(dev_priv, pipe,
@@ -474,6 +570,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
474 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
475 unsigned long irqflags; 571 unsigned long irqflags;
476 572
573 if (IS_IGDNG(dev))
574 return;
575
477 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 576 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
478 i915_disable_pipestat(dev_priv, pipe, 577 i915_disable_pipestat(dev_priv, pipe,
479 PIPE_VBLANK_INTERRUPT_ENABLE | 578 PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -484,7 +583,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
484void i915_enable_interrupt (struct drm_device *dev) 583void i915_enable_interrupt (struct drm_device *dev)
485{ 584{
486 struct drm_i915_private *dev_priv = dev->dev_private; 585 struct drm_i915_private *dev_priv = dev->dev_private;
487 opregion_enable_asle(dev); 586
587 if (!IS_IGDNG(dev))
588 opregion_enable_asle(dev);
488 dev_priv->irq_enabled = 1; 589 dev_priv->irq_enabled = 1;
489} 590}
490 591
@@ -545,12 +646,65 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
545 646
546/* drm_dma.h hooks 647/* drm_dma.h hooks
547*/ 648*/
649static void igdng_irq_preinstall(struct drm_device *dev)
650{
651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
652
653 I915_WRITE(HWSTAM, 0xeffe);
654
655 /* XXX hotplug from PCH */
656
657 I915_WRITE(DEIMR, 0xffffffff);
658 I915_WRITE(DEIER, 0x0);
659 (void) I915_READ(DEIER);
660
661 /* and GT */
662 I915_WRITE(GTIMR, 0xffffffff);
663 I915_WRITE(GTIER, 0x0);
664 (void) I915_READ(GTIER);
665}
666
667static int igdng_irq_postinstall(struct drm_device *dev)
668{
669 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
670 /* enable kind of interrupts always enabled */
671 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
672 u32 render_mask = GT_USER_INTERRUPT;
673
674 dev_priv->irq_mask_reg = ~display_mask;
675 dev_priv->de_irq_enable_reg = display_mask;
676
677 /* should always can generate irq */
678 I915_WRITE(DEIIR, I915_READ(DEIIR));
679 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
680 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
681 (void) I915_READ(DEIER);
682
683 /* user interrupt should be enabled, but masked initial */
684 dev_priv->gt_irq_mask_reg = 0xffffffff;
685 dev_priv->gt_irq_enable_reg = render_mask;
686
687 I915_WRITE(GTIIR, I915_READ(GTIIR));
688 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
689 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
690 (void) I915_READ(GTIER);
691
692 return 0;
693}
694
548void i915_driver_irq_preinstall(struct drm_device * dev) 695void i915_driver_irq_preinstall(struct drm_device * dev)
549{ 696{
550 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 697 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
551 698
552 atomic_set(&dev_priv->irq_received, 0); 699 atomic_set(&dev_priv->irq_received, 0);
553 700
701 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
702
703 if (IS_IGDNG(dev)) {
704 igdng_irq_preinstall(dev);
705 return;
706 }
707
554 if (I915_HAS_HOTPLUG(dev)) { 708 if (I915_HAS_HOTPLUG(dev)) {
555 I915_WRITE(PORT_HOTPLUG_EN, 0); 709 I915_WRITE(PORT_HOTPLUG_EN, 0);
556 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 710 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -562,7 +716,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
562 I915_WRITE(IMR, 0xffffffff); 716 I915_WRITE(IMR, 0xffffffff);
563 I915_WRITE(IER, 0x0); 717 I915_WRITE(IER, 0x0);
564 (void) I915_READ(IER); 718 (void) I915_READ(IER);
565 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
566} 719}
567 720
568int i915_driver_irq_postinstall(struct drm_device *dev) 721int i915_driver_irq_postinstall(struct drm_device *dev)
@@ -570,9 +723,12 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 723 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
571 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 724 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
572 725
726 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
727
573 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 728 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
574 729
575 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 730 if (IS_IGDNG(dev))
731 return igdng_irq_postinstall(dev);
576 732
577 /* Unmask the interrupts that we always want on. */ 733 /* Unmask the interrupts that we always want on. */
578 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 734 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -613,11 +769,24 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
613 (void) I915_READ(IER); 769 (void) I915_READ(IER);
614 770
615 opregion_enable_asle(dev); 771 opregion_enable_asle(dev);
616 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
617 772
618 return 0; 773 return 0;
619} 774}
620 775
776static void igdng_irq_uninstall(struct drm_device *dev)
777{
778 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
779 I915_WRITE(HWSTAM, 0xffffffff);
780
781 I915_WRITE(DEIMR, 0xffffffff);
782 I915_WRITE(DEIER, 0x0);
783 I915_WRITE(DEIIR, I915_READ(DEIIR));
784
785 I915_WRITE(GTIMR, 0xffffffff);
786 I915_WRITE(GTIER, 0x0);
787 I915_WRITE(GTIIR, I915_READ(GTIIR));
788}
789
621void i915_driver_irq_uninstall(struct drm_device * dev) 790void i915_driver_irq_uninstall(struct drm_device * dev)
622{ 791{
623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 792 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -627,6 +796,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
627 796
628 dev_priv->vblank_pipe = 0; 797 dev_priv->vblank_pipe = 0;
629 798
799 if (IS_IGDNG(dev)) {
800 igdng_irq_uninstall(dev);
801 return;
802 }
803
630 if (I915_HAS_HOTPLUG(dev)) { 804 if (I915_HAS_HOTPLUG(dev)) {
631 I915_WRITE(PORT_HOTPLUG_EN, 0); 805 I915_WRITE(PORT_HOTPLUG_EN, 0);
632 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 806 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 375569d01d01..f6237a0b1133 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,13 @@
450#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 450#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
451#define PLL_REF_INPUT_MASK (3 << 13) 451#define PLL_REF_INPUT_MASK (3 << 13)
452#define PLL_LOAD_PULSE_PHASE_SHIFT 9 452#define PLL_LOAD_PULSE_PHASE_SHIFT 9
453/* IGDNG */
454# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
455# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
456# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
457# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
458# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
459
453/* 460/*
454 * Parallel to Serial Load Pulse phase selection. 461 * Parallel to Serial Load Pulse phase selection.
455 * Selects the phase for the 10X DPLL clock for the PCIe 462 * Selects the phase for the 10X DPLL clock for the PCIe
@@ -631,8 +638,11 @@
631/* Hotplug control (945+ only) */ 638/* Hotplug control (945+ only) */
632#define PORT_HOTPLUG_EN 0x61110 639#define PORT_HOTPLUG_EN 0x61110
633#define HDMIB_HOTPLUG_INT_EN (1 << 29) 640#define HDMIB_HOTPLUG_INT_EN (1 << 29)
641#define DPB_HOTPLUG_INT_EN (1 << 29)
634#define HDMIC_HOTPLUG_INT_EN (1 << 28) 642#define HDMIC_HOTPLUG_INT_EN (1 << 28)
643#define DPC_HOTPLUG_INT_EN (1 << 28)
635#define HDMID_HOTPLUG_INT_EN (1 << 27) 644#define HDMID_HOTPLUG_INT_EN (1 << 27)
645#define DPD_HOTPLUG_INT_EN (1 << 27)
636#define SDVOB_HOTPLUG_INT_EN (1 << 26) 646#define SDVOB_HOTPLUG_INT_EN (1 << 26)
637#define SDVOC_HOTPLUG_INT_EN (1 << 25) 647#define SDVOC_HOTPLUG_INT_EN (1 << 25)
638#define TV_HOTPLUG_INT_EN (1 << 18) 648#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -665,8 +675,11 @@
665 675
666#define PORT_HOTPLUG_STAT 0x61114 676#define PORT_HOTPLUG_STAT 0x61114
667#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 677#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
678#define DPB_HOTPLUG_INT_STATUS (1 << 29)
668#define HDMIC_HOTPLUG_INT_STATUS (1 << 28) 679#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
680#define DPC_HOTPLUG_INT_STATUS (1 << 28)
669#define HDMID_HOTPLUG_INT_STATUS (1 << 27) 681#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
682#define DPD_HOTPLUG_INT_STATUS (1 << 27)
670#define CRT_HOTPLUG_INT_STATUS (1 << 11) 683#define CRT_HOTPLUG_INT_STATUS (1 << 11)
671#define TV_HOTPLUG_INT_STATUS (1 << 10) 684#define TV_HOTPLUG_INT_STATUS (1 << 10)
672#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 685#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -951,15 +964,15 @@
951# define DAC_A_1_3_V (0 << 4) 964# define DAC_A_1_3_V (0 << 4)
952# define DAC_A_1_1_V (1 << 4) 965# define DAC_A_1_1_V (1 << 4)
953# define DAC_A_0_7_V (2 << 4) 966# define DAC_A_0_7_V (2 << 4)
954# define DAC_A_OFF (3 << 4) 967# define DAC_A_MASK (3 << 4)
955# define DAC_B_1_3_V (0 << 2) 968# define DAC_B_1_3_V (0 << 2)
956# define DAC_B_1_1_V (1 << 2) 969# define DAC_B_1_1_V (1 << 2)
957# define DAC_B_0_7_V (2 << 2) 970# define DAC_B_0_7_V (2 << 2)
958# define DAC_B_OFF (3 << 2) 971# define DAC_B_MASK (3 << 2)
959# define DAC_C_1_3_V (0 << 0) 972# define DAC_C_1_3_V (0 << 0)
960# define DAC_C_1_1_V (1 << 0) 973# define DAC_C_1_1_V (1 << 0)
961# define DAC_C_0_7_V (2 << 0) 974# define DAC_C_0_7_V (2 << 0)
962# define DAC_C_OFF (3 << 0) 975# define DAC_C_MASK (3 << 0)
963 976
964/** 977/**
965 * CSC coefficients are stored in a floating point format with 9 bits of 978 * CSC coefficients are stored in a floating point format with 9 bits of
@@ -1328,6 +1341,163 @@
1328#define TV_V_CHROMA_0 0x68400 1341#define TV_V_CHROMA_0 0x68400
1329#define TV_V_CHROMA_42 0x684a8 1342#define TV_V_CHROMA_42 0x684a8
1330 1343
1344/* Display Port */
1345#define DP_B 0x64100
1346#define DP_C 0x64200
1347#define DP_D 0x64300
1348
1349#define DP_PORT_EN (1 << 31)
1350#define DP_PIPEB_SELECT (1 << 30)
1351
1352/* Link training mode - select a suitable mode for each stage */
1353#define DP_LINK_TRAIN_PAT_1 (0 << 28)
1354#define DP_LINK_TRAIN_PAT_2 (1 << 28)
1355#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
1356#define DP_LINK_TRAIN_OFF (3 << 28)
1357#define DP_LINK_TRAIN_MASK (3 << 28)
1358#define DP_LINK_TRAIN_SHIFT 28
1359
1360/* Signal voltages. These are mostly controlled by the other end */
1361#define DP_VOLTAGE_0_4 (0 << 25)
1362#define DP_VOLTAGE_0_6 (1 << 25)
1363#define DP_VOLTAGE_0_8 (2 << 25)
1364#define DP_VOLTAGE_1_2 (3 << 25)
1365#define DP_VOLTAGE_MASK (7 << 25)
1366#define DP_VOLTAGE_SHIFT 25
1367
1368/* Signal pre-emphasis levels, like voltages, the other end tells us what
1369 * they want
1370 */
1371#define DP_PRE_EMPHASIS_0 (0 << 22)
1372#define DP_PRE_EMPHASIS_3_5 (1 << 22)
1373#define DP_PRE_EMPHASIS_6 (2 << 22)
1374#define DP_PRE_EMPHASIS_9_5 (3 << 22)
1375#define DP_PRE_EMPHASIS_MASK (7 << 22)
1376#define DP_PRE_EMPHASIS_SHIFT 22
1377
1378/* How many wires to use. I guess 3 was too hard */
1379#define DP_PORT_WIDTH_1 (0 << 19)
1380#define DP_PORT_WIDTH_2 (1 << 19)
1381#define DP_PORT_WIDTH_4 (3 << 19)
1382#define DP_PORT_WIDTH_MASK (7 << 19)
1383
1384/* Mystic DPCD version 1.1 special mode */
1385#define DP_ENHANCED_FRAMING (1 << 18)
1386
1387/** locked once port is enabled */
1388#define DP_PORT_REVERSAL (1 << 15)
1389
1390/** sends the clock on lane 15 of the PEG for debug */
1391#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1392
1393#define DP_SCRAMBLING_DISABLE (1 << 12)
1394
1395/** limit RGB values to avoid confusing TVs */
1396#define DP_COLOR_RANGE_16_235 (1 << 8)
1397
1398/** Turn on the audio link */
1399#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
1400
1401/** vs and hs sync polarity */
1402#define DP_SYNC_VS_HIGH (1 << 4)
1403#define DP_SYNC_HS_HIGH (1 << 3)
1404
1405/** A fantasy */
1406#define DP_DETECTED (1 << 2)
1407
1408/** The aux channel provides a way to talk to the
1409 * signal sink for DDC etc. Max packet size supported
1410 * is 20 bytes in each direction, hence the 5 fixed
1411 * data registers
1412 */
1413#define DPB_AUX_CH_CTL 0x64110
1414#define DPB_AUX_CH_DATA1 0x64114
1415#define DPB_AUX_CH_DATA2 0x64118
1416#define DPB_AUX_CH_DATA3 0x6411c
1417#define DPB_AUX_CH_DATA4 0x64120
1418#define DPB_AUX_CH_DATA5 0x64124
1419
1420#define DPC_AUX_CH_CTL 0x64210
1421#define DPC_AUX_CH_DATA1 0x64214
1422#define DPC_AUX_CH_DATA2 0x64218
1423#define DPC_AUX_CH_DATA3 0x6421c
1424#define DPC_AUX_CH_DATA4 0x64220
1425#define DPC_AUX_CH_DATA5 0x64224
1426
1427#define DPD_AUX_CH_CTL 0x64310
1428#define DPD_AUX_CH_DATA1 0x64314
1429#define DPD_AUX_CH_DATA2 0x64318
1430#define DPD_AUX_CH_DATA3 0x6431c
1431#define DPD_AUX_CH_DATA4 0x64320
1432#define DPD_AUX_CH_DATA5 0x64324
1433
1434#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
1435#define DP_AUX_CH_CTL_DONE (1 << 30)
1436#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
1437#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
1438#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
1439#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
1440#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
1441#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
1442#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
1443#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
1444#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
1445#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
1446#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
1447#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
1448#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
1449#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
1450#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
1451#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
1452#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
1453#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
1454#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
1455
1456/*
1457 * Computing GMCH M and N values for the Display Port link
1458 *
1459 * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
1460 *
1461 * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
1462 *
1463 * The GMCH value is used internally
1464 *
1465 * bytes_per_pixel is the number of bytes coming out of the plane,
1466 * which is after the LUTs, so we want the bytes for our color format.
1467 * For our current usage, this is always 3, one byte for R, G and B.
1468 */
1469#define PIPEA_GMCH_DATA_M 0x70050
1470#define PIPEB_GMCH_DATA_M 0x71050
1471
1472/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
1473#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
1474#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
1475
1476#define PIPE_GMCH_DATA_M_MASK (0xffffff)
1477
1478#define PIPEA_GMCH_DATA_N 0x70054
1479#define PIPEB_GMCH_DATA_N 0x71054
1480#define PIPE_GMCH_DATA_N_MASK (0xffffff)
1481
1482/*
1483 * Computing Link M and N values for the Display Port link
1484 *
1485 * Link M / N = pixel_clock / ls_clk
1486 *
1487 * (the DP spec calls pixel_clock the 'strm_clk')
1488 *
1489 * The Link value is transmitted in the Main Stream
1490 * Attributes and VB-ID.
1491 */
1492
1493#define PIPEA_DP_LINK_M 0x70060
1494#define PIPEB_DP_LINK_M 0x71060
1495#define PIPEA_DP_LINK_M_MASK (0xffffff)
1496
1497#define PIPEA_DP_LINK_N 0x70064
1498#define PIPEB_DP_LINK_N 0x71064
1499#define PIPEA_DP_LINK_N_MASK (0xffffff)
1500
1331/* Display & cursor control */ 1501/* Display & cursor control */
1332 1502
1333/* Pipe A */ 1503/* Pipe A */
@@ -1517,4 +1687,444 @@
1517# define VGA_2X_MODE (1 << 30) 1687# define VGA_2X_MODE (1 << 30)
1518# define VGA_PIPE_B_SELECT (1 << 29) 1688# define VGA_PIPE_B_SELECT (1 << 29)
1519 1689
1690/* IGDNG */
1691
1692#define CPU_VGACNTRL 0x41000
1693
1694#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
1695#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
1696#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
1697#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
1698#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
1699#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
1700#define DIGITAL_PORTA_NO_DETECT (0 << 0)
1701#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
1702#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
1703
1704/* refresh rate hardware control */
1705#define RR_HW_CTL 0x45300
1706#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
1707#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
1708
1709#define FDI_PLL_BIOS_0 0x46000
1710#define FDI_PLL_BIOS_1 0x46004
1711#define FDI_PLL_BIOS_2 0x46008
1712#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
1713#define DISPLAY_PORT_PLL_BIOS_1 0x46010
1714#define DISPLAY_PORT_PLL_BIOS_2 0x46014
1715
1716#define FDI_PLL_FREQ_CTL 0x46030
1717#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
1718#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
1719#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
1720
1721
1722#define PIPEA_DATA_M1 0x60030
1723#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
1724#define TU_SIZE_MASK 0x7e000000
1725#define PIPEA_DATA_M1_OFFSET 0
1726#define PIPEA_DATA_N1 0x60034
1727#define PIPEA_DATA_N1_OFFSET 0
1728
1729#define PIPEA_DATA_M2 0x60038
1730#define PIPEA_DATA_M2_OFFSET 0
1731#define PIPEA_DATA_N2 0x6003c
1732#define PIPEA_DATA_N2_OFFSET 0
1733
1734#define PIPEA_LINK_M1 0x60040
1735#define PIPEA_LINK_M1_OFFSET 0
1736#define PIPEA_LINK_N1 0x60044
1737#define PIPEA_LINK_N1_OFFSET 0
1738
1739#define PIPEA_LINK_M2 0x60048
1740#define PIPEA_LINK_M2_OFFSET 0
1741#define PIPEA_LINK_N2 0x6004c
1742#define PIPEA_LINK_N2_OFFSET 0
1743
1744/* PIPEB timing regs are same start from 0x61000 */
1745
1746#define PIPEB_DATA_M1 0x61030
1747#define PIPEB_DATA_M1_OFFSET 0
1748#define PIPEB_DATA_N1 0x61034
1749#define PIPEB_DATA_N1_OFFSET 0
1750
1751#define PIPEB_DATA_M2 0x61038
1752#define PIPEB_DATA_M2_OFFSET 0
1753#define PIPEB_DATA_N2 0x6103c
1754#define PIPEB_DATA_N2_OFFSET 0
1755
1756#define PIPEB_LINK_M1 0x61040
1757#define PIPEB_LINK_M1_OFFSET 0
1758#define PIPEB_LINK_N1 0x61044
1759#define PIPEB_LINK_N1_OFFSET 0
1760
1761#define PIPEB_LINK_M2 0x61048
1762#define PIPEB_LINK_M2_OFFSET 0
1763#define PIPEB_LINK_N2 0x6104c
1764#define PIPEB_LINK_N2_OFFSET 0
1765
1766/* CPU panel fitter */
1767#define PFA_CTL_1 0x68080
1768#define PFB_CTL_1 0x68880
1769#define PF_ENABLE (1<<31)
1770
1771/* legacy palette */
1772#define LGC_PALETTE_A 0x4a000
1773#define LGC_PALETTE_B 0x4a800
1774
1775/* interrupts */
1776#define DE_MASTER_IRQ_CONTROL (1 << 31)
1777#define DE_SPRITEB_FLIP_DONE (1 << 29)
1778#define DE_SPRITEA_FLIP_DONE (1 << 28)
1779#define DE_PLANEB_FLIP_DONE (1 << 27)
1780#define DE_PLANEA_FLIP_DONE (1 << 26)
1781#define DE_PCU_EVENT (1 << 25)
1782#define DE_GTT_FAULT (1 << 24)
1783#define DE_POISON (1 << 23)
1784#define DE_PERFORM_COUNTER (1 << 22)
1785#define DE_PCH_EVENT (1 << 21)
1786#define DE_AUX_CHANNEL_A (1 << 20)
1787#define DE_DP_A_HOTPLUG (1 << 19)
1788#define DE_GSE (1 << 18)
1789#define DE_PIPEB_VBLANK (1 << 15)
1790#define DE_PIPEB_EVEN_FIELD (1 << 14)
1791#define DE_PIPEB_ODD_FIELD (1 << 13)
1792#define DE_PIPEB_LINE_COMPARE (1 << 12)
1793#define DE_PIPEB_VSYNC (1 << 11)
1794#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
1795#define DE_PIPEA_VBLANK (1 << 7)
1796#define DE_PIPEA_EVEN_FIELD (1 << 6)
1797#define DE_PIPEA_ODD_FIELD (1 << 5)
1798#define DE_PIPEA_LINE_COMPARE (1 << 4)
1799#define DE_PIPEA_VSYNC (1 << 3)
1800#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
1801
1802#define DEISR 0x44000
1803#define DEIMR 0x44004
1804#define DEIIR 0x44008
1805#define DEIER 0x4400c
1806
1807/* GT interrupt */
1808#define GT_SYNC_STATUS (1 << 2)
1809#define GT_USER_INTERRUPT (1 << 0)
1810
1811#define GTISR 0x44010
1812#define GTIMR 0x44014
1813#define GTIIR 0x44018
1814#define GTIER 0x4401c
1815
1816/* PCH */
1817
1818/* south display engine interrupt */
1819#define SDE_CRT_HOTPLUG (1 << 11)
1820#define SDE_PORTD_HOTPLUG (1 << 10)
1821#define SDE_PORTC_HOTPLUG (1 << 9)
1822#define SDE_PORTB_HOTPLUG (1 << 8)
1823#define SDE_SDVOB_HOTPLUG (1 << 6)
1824
1825#define SDEISR 0xc4000
1826#define SDEIMR 0xc4004
1827#define SDEIIR 0xc4008
1828#define SDEIER 0xc400c
1829
1830/* digital port hotplug */
1831#define PCH_PORT_HOTPLUG 0xc4030
1832#define PORTD_HOTPLUG_ENABLE (1 << 20)
1833#define PORTD_PULSE_DURATION_2ms (0)
1834#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
1835#define PORTD_PULSE_DURATION_6ms (2 << 18)
1836#define PORTD_PULSE_DURATION_100ms (3 << 18)
1837#define PORTD_HOTPLUG_NO_DETECT (0)
1838#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
1839#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
1840#define PORTC_HOTPLUG_ENABLE (1 << 12)
1841#define PORTC_PULSE_DURATION_2ms (0)
1842#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
1843#define PORTC_PULSE_DURATION_6ms (2 << 10)
1844#define PORTC_PULSE_DURATION_100ms (3 << 10)
1845#define PORTC_HOTPLUG_NO_DETECT (0)
1846#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
1847#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
1848#define PORTB_HOTPLUG_ENABLE (1 << 4)
1849#define PORTB_PULSE_DURATION_2ms (0)
1850#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
1851#define PORTB_PULSE_DURATION_6ms (2 << 2)
1852#define PORTB_PULSE_DURATION_100ms (3 << 2)
1853#define PORTB_HOTPLUG_NO_DETECT (0)
1854#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
1855#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
1856
1857#define PCH_GPIOA 0xc5010
1858#define PCH_GPIOB 0xc5014
1859#define PCH_GPIOC 0xc5018
1860#define PCH_GPIOD 0xc501c
1861#define PCH_GPIOE 0xc5020
1862#define PCH_GPIOF 0xc5024
1863
1864#define PCH_DPLL_A 0xc6014
1865#define PCH_DPLL_B 0xc6018
1866
1867#define PCH_FPA0 0xc6040
1868#define PCH_FPA1 0xc6044
1869#define PCH_FPB0 0xc6048
1870#define PCH_FPB1 0xc604c
1871
1872#define PCH_DPLL_TEST 0xc606c
1873
1874#define PCH_DREF_CONTROL 0xC6200
1875#define DREF_CONTROL_MASK 0x7fc3
1876#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
1877#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
1878#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
1879#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
1880#define DREF_SSC_SOURCE_DISABLE (0<<11)
1881#define DREF_SSC_SOURCE_ENABLE (2<<11)
1882#define DREF_SSC_SOURCE_MASK (2<<11)
1883#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
1884#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
1885#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
1886#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
1887#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
1888#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
1889#define DREF_SSC4_DOWNSPREAD (0<<6)
1890#define DREF_SSC4_CENTERSPREAD (1<<6)
1891#define DREF_SSC1_DISABLE (0<<1)
1892#define DREF_SSC1_ENABLE (1<<1)
1893#define DREF_SSC4_DISABLE (0)
1894#define DREF_SSC4_ENABLE (1)
1895
1896#define PCH_RAWCLK_FREQ 0xc6204
1897#define FDL_TP1_TIMER_SHIFT 12
1898#define FDL_TP1_TIMER_MASK (3<<12)
1899#define FDL_TP2_TIMER_SHIFT 10
1900#define FDL_TP2_TIMER_MASK (3<<10)
1901#define RAWCLK_FREQ_MASK 0x3ff
1902
1903#define PCH_DPLL_TMR_CFG 0xc6208
1904
1905#define PCH_SSC4_PARMS 0xc6210
1906#define PCH_SSC4_AUX_PARMS 0xc6214
1907
1908/* transcoder */
1909
1910#define TRANS_HTOTAL_A 0xe0000
1911#define TRANS_HTOTAL_SHIFT 16
1912#define TRANS_HACTIVE_SHIFT 0
1913#define TRANS_HBLANK_A 0xe0004
1914#define TRANS_HBLANK_END_SHIFT 16
1915#define TRANS_HBLANK_START_SHIFT 0
1916#define TRANS_HSYNC_A 0xe0008
1917#define TRANS_HSYNC_END_SHIFT 16
1918#define TRANS_HSYNC_START_SHIFT 0
1919#define TRANS_VTOTAL_A 0xe000c
1920#define TRANS_VTOTAL_SHIFT 16
1921#define TRANS_VACTIVE_SHIFT 0
1922#define TRANS_VBLANK_A 0xe0010
1923#define TRANS_VBLANK_END_SHIFT 16
1924#define TRANS_VBLANK_START_SHIFT 0
1925#define TRANS_VSYNC_A 0xe0014
1926#define TRANS_VSYNC_END_SHIFT 16
1927#define TRANS_VSYNC_START_SHIFT 0
1928
1929#define TRANSA_DATA_M1 0xe0030
1930#define TRANSA_DATA_N1 0xe0034
1931#define TRANSA_DATA_M2 0xe0038
1932#define TRANSA_DATA_N2 0xe003c
1933#define TRANSA_DP_LINK_M1 0xe0040
1934#define TRANSA_DP_LINK_N1 0xe0044
1935#define TRANSA_DP_LINK_M2 0xe0048
1936#define TRANSA_DP_LINK_N2 0xe004c
1937
1938#define TRANS_HTOTAL_B 0xe1000
1939#define TRANS_HBLANK_B 0xe1004
1940#define TRANS_HSYNC_B 0xe1008
1941#define TRANS_VTOTAL_B 0xe100c
1942#define TRANS_VBLANK_B 0xe1010
1943#define TRANS_VSYNC_B 0xe1014
1944
1945#define TRANSB_DATA_M1 0xe1030
1946#define TRANSB_DATA_N1 0xe1034
1947#define TRANSB_DATA_M2 0xe1038
1948#define TRANSB_DATA_N2 0xe103c
1949#define TRANSB_DP_LINK_M1 0xe1040
1950#define TRANSB_DP_LINK_N1 0xe1044
1951#define TRANSB_DP_LINK_M2 0xe1048
1952#define TRANSB_DP_LINK_N2 0xe104c
1953
1954#define TRANSACONF 0xf0008
1955#define TRANSBCONF 0xf1008
1956#define TRANS_DISABLE (0<<31)
1957#define TRANS_ENABLE (1<<31)
1958#define TRANS_STATE_MASK (1<<30)
1959#define TRANS_STATE_DISABLE (0<<30)
1960#define TRANS_STATE_ENABLE (1<<30)
1961#define TRANS_FSYNC_DELAY_HB1 (0<<27)
1962#define TRANS_FSYNC_DELAY_HB2 (1<<27)
1963#define TRANS_FSYNC_DELAY_HB3 (2<<27)
1964#define TRANS_FSYNC_DELAY_HB4 (3<<27)
1965#define TRANS_DP_AUDIO_ONLY (1<<26)
1966#define TRANS_DP_VIDEO_AUDIO (0<<26)
1967#define TRANS_PROGRESSIVE (0<<21)
1968#define TRANS_8BPC (0<<5)
1969#define TRANS_10BPC (1<<5)
1970#define TRANS_6BPC (2<<5)
1971#define TRANS_12BPC (3<<5)
1972
1973#define FDI_RXA_CHICKEN 0xc200c
1974#define FDI_RXB_CHICKEN 0xc2010
1975#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
1976
1977/* CPU: FDI_TX */
1978#define FDI_TXA_CTL 0x60100
1979#define FDI_TXB_CTL 0x61100
1980#define FDI_TX_DISABLE (0<<31)
1981#define FDI_TX_ENABLE (1<<31)
1982#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
1983#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
1984#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
1985#define FDI_LINK_TRAIN_NONE (3<<28)
1986#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
1987#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
1988#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
1989#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
1990#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
1991#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
1992#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
1993#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
1994#define FDI_DP_PORT_WIDTH_X1 (0<<19)
1995#define FDI_DP_PORT_WIDTH_X2 (1<<19)
1996#define FDI_DP_PORT_WIDTH_X3 (2<<19)
1997#define FDI_DP_PORT_WIDTH_X4 (3<<19)
1998#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
1999/* IGDNG: hardwired to 1 */
2000#define FDI_TX_PLL_ENABLE (1<<14)
2001/* both Tx and Rx */
2002#define FDI_SCRAMBLING_ENABLE (0<<7)
2003#define FDI_SCRAMBLING_DISABLE (1<<7)
2004
2005/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
2006#define FDI_RXA_CTL 0xf000c
2007#define FDI_RXB_CTL 0xf100c
2008#define FDI_RX_ENABLE (1<<31)
2009#define FDI_RX_DISABLE (0<<31)
2010/* train, dp width same as FDI_TX */
2011#define FDI_DP_PORT_WIDTH_X8 (7<<19)
2012#define FDI_8BPC (0<<16)
2013#define FDI_10BPC (1<<16)
2014#define FDI_6BPC (2<<16)
2015#define FDI_12BPC (3<<16)
2016#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
2017#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
2018#define FDI_RX_PLL_ENABLE (1<<13)
2019#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
2020#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
2021#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
2022#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
2023#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2024#define FDI_SEL_RAWCLK (0<<4)
2025#define FDI_SEL_PCDCLK (1<<4)
2026
2027#define FDI_RXA_MISC 0xf0010
2028#define FDI_RXB_MISC 0xf1010
2029#define FDI_RXA_TUSIZE1 0xf0030
2030#define FDI_RXA_TUSIZE2 0xf0038
2031#define FDI_RXB_TUSIZE1 0xf1030
2032#define FDI_RXB_TUSIZE2 0xf1038
2033
2034/* FDI_RX interrupt register format */
2035#define FDI_RX_INTER_LANE_ALIGN (1<<10)
2036#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
2037#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
2038#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
2039#define FDI_RX_FS_CODE_ERR (1<<6)
2040#define FDI_RX_FE_CODE_ERR (1<<5)
2041#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
2042#define FDI_RX_HDCP_LINK_FAIL (1<<3)
2043#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
2044#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
2045#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
2046
2047#define FDI_RXA_IIR 0xf0014
2048#define FDI_RXA_IMR 0xf0018
2049#define FDI_RXB_IIR 0xf1014
2050#define FDI_RXB_IMR 0xf1018
2051
2052#define FDI_PLL_CTL_1 0xfe000
2053#define FDI_PLL_CTL_2 0xfe004
2054
2055/* CRT */
2056#define PCH_ADPA 0xe1100
2057#define ADPA_TRANS_SELECT_MASK (1<<30)
2058#define ADPA_TRANS_A_SELECT 0
2059#define ADPA_TRANS_B_SELECT (1<<30)
2060#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
2061#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
2062#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
2063#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
2064#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
2065#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
2066#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
2067#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
2068#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
2069#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
2070#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
2071#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
2072#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
2073#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
2074#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
2075#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
2076#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
2077#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
2078#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
2079
2080/* or SDVOB */
2081#define HDMIB 0xe1140
2082#define PORT_ENABLE (1 << 31)
2083#define TRANSCODER_A (0)
2084#define TRANSCODER_B (1 << 30)
2085#define COLOR_FORMAT_8bpc (0)
2086#define COLOR_FORMAT_12bpc (3 << 26)
2087#define SDVOB_HOTPLUG_ENABLE (1 << 23)
2088#define SDVO_ENCODING (0)
2089#define TMDS_ENCODING (2 << 10)
2090#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
2091#define SDVOB_BORDER_ENABLE (1 << 7)
2092#define AUDIO_ENABLE (1 << 6)
2093#define VSYNC_ACTIVE_HIGH (1 << 4)
2094#define HSYNC_ACTIVE_HIGH (1 << 3)
2095#define PORT_DETECTED (1 << 2)
2096
2097#define HDMIC 0xe1150
2098#define HDMID 0xe1160
2099
2100#define PCH_LVDS 0xe1180
2101#define LVDS_DETECTED (1 << 1)
2102
2103#define BLC_PWM_CPU_CTL2 0x48250
2104#define PWM_ENABLE (1 << 31)
2105#define PWM_PIPE_A (0 << 29)
2106#define PWM_PIPE_B (1 << 29)
2107#define BLC_PWM_CPU_CTL 0x48254
2108
2109#define BLC_PWM_PCH_CTL1 0xc8250
2110#define PWM_PCH_ENABLE (1 << 31)
2111#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
2112#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
2113#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
2114#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
2115
2116#define BLC_PWM_PCH_CTL2 0xc8254
2117
2118#define PCH_PP_STATUS 0xc7200
2119#define PCH_PP_CONTROL 0xc7204
2120#define EDP_FORCE_VDD (1 << 3)
2121#define EDP_BLC_ENABLE (1 << 2)
2122#define PANEL_POWER_RESET (1 << 1)
2123#define PANEL_POWER_OFF (0 << 0)
2124#define PANEL_POWER_ON (1 << 0)
2125#define PCH_PP_ON_DELAYS 0xc7208
2126#define EDP_PANEL (1 << 30)
2127#define PCH_PP_OFF_DELAYS 0xc720c
2128#define PCH_PP_DIVISOR 0xc7210
2129
1520#endif /* _I915_REG_H_ */ 2130#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ce8a21344a71..a98e2831ed31 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -295,6 +295,16 @@ int i915_save_state(struct drm_device *dev)
295 i915_save_palette(dev, PIPE_B); 295 i915_save_palette(dev, PIPE_B);
296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
297 297
298 /* Cursor state */
299 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
300 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
301 dev_priv->saveCURABASE = I915_READ(CURABASE);
302 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
303 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
304 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
305 if (!IS_I9XX(dev))
306 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
307
298 /* CRT state */ 308 /* CRT state */
299 dev_priv->saveADPA = I915_READ(ADPA); 309 dev_priv->saveADPA = I915_READ(ADPA);
300 310
@@ -480,6 +490,16 @@ int i915_restore_state(struct drm_device *dev)
480 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 490 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
481 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 491 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
482 492
493 /* Cursor state */
494 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
495 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
496 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
497 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
498 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
499 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
500 if (!IS_I9XX(dev))
501 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
502
483 /* CRT state */ 503 /* CRT state */
484 I915_WRITE(ADPA, dev_priv->saveADPA); 504 I915_WRITE(ADPA, dev_priv->saveADPA);
485 505
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 9d78cff33b24..754dd22fdd77 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -30,6 +30,8 @@
30#include "i915_drv.h" 30#include "i915_drv.h"
31#include "intel_bios.h" 31#include "intel_bios.h"
32 32
33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72
33 35
34static void * 36static void *
35find_section(struct bdb_header *bdb, int section_id) 37find_section(struct bdb_header *bdb, int section_id)
@@ -193,6 +195,88 @@ parse_general_features(struct drm_i915_private *dev_priv,
193 } 195 }
194} 196}
195 197
198static void
199parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
200 struct bdb_header *bdb)
201{
202 struct sdvo_device_mapping *p_mapping;
203 struct bdb_general_definitions *p_defs;
204 struct child_device_config *p_child;
205 int i, child_device_num, count;
206 u16 block_size, *block_ptr;
207
208 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
209 if (!p_defs) {
210 DRM_DEBUG("No general definition block is found\n");
211 return;
212 }
213 /* judge whether the size of child device meets the requirements.
214 * If the child device size obtained from general definition block
215 * is different with sizeof(struct child_device_config), skip the
216 * parsing of sdvo device info
217 */
218 if (p_defs->child_dev_size != sizeof(*p_child)) {
219 /* different child dev size . Ignore it */
220 DRM_DEBUG("different child size is found. Invalid.\n");
221 return;
222 }
223 /* get the block size of general definitions */
224 block_ptr = (u16 *)((char *)p_defs - 2);
225 block_size = *block_ptr;
226 /* get the number of child device */
227 child_device_num = (block_size - sizeof(*p_defs)) /
228 sizeof(*p_child);
229 count = 0;
230 for (i = 0; i < child_device_num; i++) {
231 p_child = &(p_defs->devices[i]);
232 if (!p_child->device_type) {
233 /* skip the device block if device type is invalid */
234 continue;
235 }
236 if (p_child->slave_addr != SLAVE_ADDR1 &&
237 p_child->slave_addr != SLAVE_ADDR2) {
238 /*
239 * If the slave address is neither 0x70 nor 0x72,
240 * it is not a SDVO device. Skip it.
241 */
242 continue;
243 }
244 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
245 p_child->dvo_port != DEVICE_PORT_DVOC) {
246 /* skip the incorrect SDVO port */
247 DRM_DEBUG("Incorrect SDVO port. Skip it \n");
248 continue;
249 }
250 DRM_DEBUG("the SDVO device with slave addr %2x is found on "
251 "%s port\n",
252 p_child->slave_addr,
253 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
254 "SDVOB" : "SDVOC");
255 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
256 if (!p_mapping->initialized) {
257 p_mapping->dvo_port = p_child->dvo_port;
258 p_mapping->slave_addr = p_child->slave_addr;
259 p_mapping->dvo_wiring = p_child->dvo_wiring;
260 p_mapping->initialized = 1;
261 } else {
262 DRM_DEBUG("Maybe one SDVO port is shared by "
263 "two SDVO device.\n");
264 }
265 if (p_child->slave2_addr) {
266 /* Maybe this is a SDVO device with multiple inputs */
267 /* And the mapping info is not added */
268 DRM_DEBUG("there exists the slave2_addr. Maybe this "
269 "is a SDVO device with multiple inputs.\n");
270 }
271 count++;
272 }
273
274 if (!count) {
275 /* No SDVO device info is found */
276 DRM_DEBUG("No SDVO device info is found in VBT\n");
277 }
278 return;
279}
196/** 280/**
197 * intel_init_bios - initialize VBIOS settings & find VBT 281 * intel_init_bios - initialize VBIOS settings & find VBT
198 * @dev: DRM device 282 * @dev: DRM device
@@ -242,7 +326,7 @@ intel_init_bios(struct drm_device *dev)
242 parse_general_features(dev_priv, bdb); 326 parse_general_features(dev_priv, bdb);
243 parse_lfp_panel_data(dev_priv, bdb); 327 parse_lfp_panel_data(dev_priv, bdb);
244 parse_sdvo_panel_data(dev_priv, bdb); 328 parse_sdvo_panel_data(dev_priv, bdb);
245 329 parse_sdvo_device_mapping(dev_priv, bdb);
246 pci_unmap_rom(pdev, bios); 330 pci_unmap_rom(pdev, bios);
247 331
248 return 0; 332 return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 8ca2cde15804..fe72e1c225d8 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -135,6 +135,86 @@ struct bdb_general_features {
135 u8 rsvd11:6; /* finish byte */ 135 u8 rsvd11:6; /* finish byte */
136} __attribute__((packed)); 136} __attribute__((packed));
137 137
138/* pre-915 */
139#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
140#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
141#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
142#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
143
144/* Pre 915 */
145#define DEVICE_TYPE_NONE 0x00
146#define DEVICE_TYPE_CRT 0x01
147#define DEVICE_TYPE_TV 0x09
148#define DEVICE_TYPE_EFP 0x12
149#define DEVICE_TYPE_LFP 0x22
150/* On 915+ */
151#define DEVICE_TYPE_CRT_DPMS 0x6001
152#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
153#define DEVICE_TYPE_TV_COMPOSITE 0x0209
154#define DEVICE_TYPE_TV_MACROVISION 0x0289
155#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
156#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
157#define DEVICE_TYPE_TV_SCART 0x0209
158#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
159#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
160#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
161#define DEVICE_TYPE_EFP_DVI_I 0x6053
162#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
163#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
164#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
165#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
166#define DEVICE_TYPE_LFP_PANELLINK 0x5012
167#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
168#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
169#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
170#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
171
172#define DEVICE_CFG_NONE 0x00
173#define DEVICE_CFG_12BIT_DVOB 0x01
174#define DEVICE_CFG_12BIT_DVOC 0x02
175#define DEVICE_CFG_24BIT_DVOBC 0x09
176#define DEVICE_CFG_24BIT_DVOCB 0x0a
177#define DEVICE_CFG_DUAL_DVOB 0x11
178#define DEVICE_CFG_DUAL_DVOC 0x12
179#define DEVICE_CFG_DUAL_DVOBC 0x13
180#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
181#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
182
183#define DEVICE_WIRE_NONE 0x00
184#define DEVICE_WIRE_DVOB 0x01
185#define DEVICE_WIRE_DVOC 0x02
186#define DEVICE_WIRE_DVOBC 0x03
187#define DEVICE_WIRE_DVOBB 0x05
188#define DEVICE_WIRE_DVOCC 0x06
189#define DEVICE_WIRE_DVOB_MASTER 0x0d
190#define DEVICE_WIRE_DVOC_MASTER 0x0e
191
192#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
193#define DEVICE_PORT_DVOB 0x01
194#define DEVICE_PORT_DVOC 0x02
195
196struct child_device_config {
197 u16 handle;
198 u16 device_type;
199 u8 device_id[10]; /* See DEVICE_TYPE_* above */
200 u16 addin_offset;
201 u8 dvo_port; /* See Device_PORT_* above */
202 u8 i2c_pin;
203 u8 slave_addr;
204 u8 ddc_pin;
205 u16 edid_ptr;
206 u8 dvo_cfg; /* See DEVICE_CFG_* above */
207 u8 dvo2_port;
208 u8 i2c2_pin;
209 u8 slave2_addr;
210 u8 ddc2_pin;
211 u8 capabilities;
212 u8 dvo_wiring;/* See DEVICE_WIRE_* above */
213 u8 dvo2_wiring;
214 u16 extended_type;
215 u8 dvo_function;
216} __attribute__((packed));
217
138struct bdb_general_definitions { 218struct bdb_general_definitions {
139 /* DDC GPIO */ 219 /* DDC GPIO */
140 u8 crt_ddc_gmbus_pin; 220 u8 crt_ddc_gmbus_pin;
@@ -149,14 +229,19 @@ struct bdb_general_definitions {
149 u8 boot_display[2]; 229 u8 boot_display[2];
150 u8 child_dev_size; 230 u8 child_dev_size;
151 231
152 /* device info */ 232 /*
153 u8 tv_or_lvds_info[33]; 233 * Device info:
154 u8 dev1[33]; 234 * If TV is present, it'll be at devices[0].
155 u8 dev2[33]; 235 * LVDS will be next, either devices[0] or [1], if present.
156 u8 dev3[33]; 236 * On some platforms the number of device is 6. But could be as few as
157 u8 dev4[33]; 237 * 4 if both TV and LVDS are missing.
158 /* may be another device block here on some platforms */ 238 * And the device num is related with the size of general definition
159}; 239 * block. It is obtained by using the following formula:
240 * number = (block_size - sizeof(bdb_general_definitions))/
241 * sizeof(child_device_config);
242 */
243 struct child_device_config devices[0];
244} __attribute__((packed));
160 245
161struct bdb_lvds_options { 246struct bdb_lvds_options {
162 u8 panel_type; 247 u8 panel_type;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79acc4f4c1f8..6de97fc66029 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -37,9 +37,14 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
37{ 37{
38 struct drm_device *dev = encoder->dev; 38 struct drm_device *dev = encoder->dev;
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp; 40 u32 temp, reg;
41 41
42 temp = I915_READ(ADPA); 42 if (IS_IGDNG(dev))
43 reg = PCH_ADPA;
44 else
45 reg = ADPA;
46
47 temp = I915_READ(reg);
43 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 48 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
44 temp |= ADPA_DAC_ENABLE; 49 temp |= ADPA_DAC_ENABLE;
45 50
@@ -58,7 +63,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
58 break; 63 break;
59 } 64 }
60 65
61 I915_WRITE(ADPA, temp); 66 I915_WRITE(reg, temp);
62} 67}
63 68
64static int intel_crt_mode_valid(struct drm_connector *connector, 69static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -101,17 +106,23 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
101 struct drm_i915_private *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = dev->dev_private;
102 int dpll_md_reg; 107 int dpll_md_reg;
103 u32 adpa, dpll_md; 108 u32 adpa, dpll_md;
109 u32 adpa_reg;
104 110
105 if (intel_crtc->pipe == 0) 111 if (intel_crtc->pipe == 0)
106 dpll_md_reg = DPLL_A_MD; 112 dpll_md_reg = DPLL_A_MD;
107 else 113 else
108 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
109 115
116 if (IS_IGDNG(dev))
117 adpa_reg = PCH_ADPA;
118 else
119 adpa_reg = ADPA;
120
110 /* 121 /*
111 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
112 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
113 */ 124 */
114 if (IS_I965G(dev)) { 125 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
115 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
116 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
117 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -125,13 +136,53 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
125 136
126 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
127 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
128 I915_WRITE(BCLRPAT_A, 0); 139 if (!IS_IGDNG(dev))
140 I915_WRITE(BCLRPAT_A, 0);
129 } else { 141 } else {
130 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0); 143 if (!IS_IGDNG(dev))
144 I915_WRITE(BCLRPAT_B, 0);
132 } 145 }
133 146
134 I915_WRITE(ADPA, adpa); 147 I915_WRITE(adpa_reg, adpa);
148}
149
150static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
151{
152 struct drm_device *dev = connector->dev;
153 struct drm_i915_private *dev_priv = dev->dev_private;
154 u32 adpa, temp;
155 bool ret;
156
157 temp = adpa = I915_READ(PCH_ADPA);
158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS |
163 ADPA_CRT_HOTPLUG_SAMPLE_4S |
164 ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
165 ADPA_CRT_HOTPLUG_VOLREF_325MV |
166 ADPA_CRT_HOTPLUG_ENABLE |
167 ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
168
169 DRM_DEBUG("pch crt adpa 0x%x", adpa);
170 I915_WRITE(PCH_ADPA, adpa);
171
172 /* This might not be needed as not specified in spec...*/
173 udelay(1000);
174
175 /* Check the status to see if both blue and green are on now */
176 adpa = I915_READ(PCH_ADPA);
177 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) ==
178 ADPA_CRT_HOTPLUG_MONITOR_COLOR)
179 ret = true;
180 else
181 ret = false;
182
183 /* restore origin register */
184 I915_WRITE(PCH_ADPA, temp);
185 return ret;
135} 186}
136 187
137/** 188/**
@@ -148,6 +199,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
148 struct drm_i915_private *dev_priv = dev->dev_private; 199 struct drm_i915_private *dev_priv = dev->dev_private;
149 u32 hotplug_en; 200 u32 hotplug_en;
150 int i, tries = 0; 201 int i, tries = 0;
202
203 if (IS_IGDNG(dev))
204 return intel_igdng_crt_detect_hotplug(connector);
205
151 /* 206 /*
152 * On 4 series desktop, CRT detect sequence need to be done twice 207 * On 4 series desktop, CRT detect sequence need to be done twice
153 * to get a reliable result. 208 * to get a reliable result.
@@ -423,6 +478,7 @@ void intel_crt_init(struct drm_device *dev)
423{ 478{
424 struct drm_connector *connector; 479 struct drm_connector *connector;
425 struct intel_output *intel_output; 480 struct intel_output *intel_output;
481 u32 i2c_reg;
426 482
427 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 483 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
428 if (!intel_output) 484 if (!intel_output)
@@ -439,7 +495,11 @@ void intel_crt_init(struct drm_device *dev)
439 &intel_output->enc); 495 &intel_output->enc);
440 496
441 /* Set up the DDC bus. */ 497 /* Set up the DDC bus. */
442 intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); 498 if (IS_IGDNG(dev))
499 i2c_reg = PCH_GPIOA;
500 else
501 i2c_reg = GPIOA;
502 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
443 if (!intel_output->ddc_bus) { 503 if (!intel_output->ddc_bus) {
444 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 504 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
445 "failed.\n"); 505 "failed.\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c9d6f10ba92e..028f5b66e3d8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -137,6 +137,8 @@ struct intel_limit {
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8 138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9 139#define INTEL_LIMIT_IGD_LVDS 9
140#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
141#define INTEL_LIMIT_IGDNG_LVDS 11
140 142
141/*The parameter is for SDVO on G4x platform*/ 143/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000 144#define G4X_DOT_SDVO_MIN 25000
@@ -216,12 +218,43 @@ struct intel_limit {
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 218#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 219#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218 220
221/* IGDNG */
222/* as we calculate clock using (register_value + 2) for
223 N/M1/M2, so here the range value for them is (actual_value-2).
224 */
225#define IGDNG_DOT_MIN 25000
226#define IGDNG_DOT_MAX 350000
227#define IGDNG_VCO_MIN 1760000
228#define IGDNG_VCO_MAX 3510000
229#define IGDNG_N_MIN 1
230#define IGDNG_N_MAX 5
231#define IGDNG_M_MIN 79
232#define IGDNG_M_MAX 118
233#define IGDNG_M1_MIN 12
234#define IGDNG_M1_MAX 23
235#define IGDNG_M2_MIN 5
236#define IGDNG_M2_MAX 9
237#define IGDNG_P_SDVO_DAC_MIN 5
238#define IGDNG_P_SDVO_DAC_MAX 80
239#define IGDNG_P_LVDS_MIN 28
240#define IGDNG_P_LVDS_MAX 112
241#define IGDNG_P1_MIN 1
242#define IGDNG_P1_MAX 8
243#define IGDNG_P2_SDVO_DAC_SLOW 10
244#define IGDNG_P2_SDVO_DAC_FAST 5
245#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
246#define IGDNG_P2_LVDS_FAST 7 /* double channel */
247#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
248
219static bool 249static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 250intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock); 251 int target, int refclk, intel_clock_t *best_clock);
222static bool 252static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 253intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock); 254 int target, int refclk, intel_clock_t *best_clock);
255static bool
256intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 int target, int refclk, intel_clock_t *best_clock);
225 258
226static const intel_limit_t intel_limits[] = { 259static const intel_limit_t intel_limits[] = {
227 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 260 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -383,9 +416,47 @@ static const intel_limit_t intel_limits[] = {
383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 416 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL, 417 .find_pll = intel_find_best_PLL,
385 }, 418 },
386 419 { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
420 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
421 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
422 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
423 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
424 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
425 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
426 .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
427 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
428 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
429 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
430 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
431 .find_pll = intel_igdng_find_best_PLL,
432 },
433 { /* INTEL_LIMIT_IGDNG_LVDS */
434 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
435 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
436 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
437 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
438 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
439 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
440 .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
441 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
442 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
443 .p2_slow = IGDNG_P2_LVDS_SLOW,
444 .p2_fast = IGDNG_P2_LVDS_FAST },
445 .find_pll = intel_igdng_find_best_PLL,
446 },
387}; 447};
388 448
449static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
450{
451 const intel_limit_t *limit;
452 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
453 limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
454 else
455 limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
456
457 return limit;
458}
459
389static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 460static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
390{ 461{
391 struct drm_device *dev = crtc->dev; 462 struct drm_device *dev = crtc->dev;
@@ -418,7 +489,9 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
418 struct drm_device *dev = crtc->dev; 489 struct drm_device *dev = crtc->dev;
419 const intel_limit_t *limit; 490 const intel_limit_t *limit;
420 491
421 if (IS_G4X(dev)) { 492 if (IS_IGDNG(dev))
493 limit = intel_igdng_limit(crtc);
494 else if (IS_G4X(dev)) {
422 limit = intel_g4x_limit(crtc); 495 limit = intel_g4x_limit(crtc);
423 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 496 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
424 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 497 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
@@ -630,7 +703,64 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
630 } 703 }
631 } 704 }
632 } 705 }
706 return found;
707}
633 708
709static bool
710intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
711 int target, int refclk, intel_clock_t *best_clock)
712{
713 struct drm_device *dev = crtc->dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715 intel_clock_t clock;
716 int max_n;
717 bool found;
718 int err_most = 47;
719 found = false;
720
721 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
722 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
723 LVDS_CLKB_POWER_UP)
724 clock.p2 = limit->p2.p2_fast;
725 else
726 clock.p2 = limit->p2.p2_slow;
727 } else {
728 if (target < limit->p2.dot_limit)
729 clock.p2 = limit->p2.p2_slow;
730 else
731 clock.p2 = limit->p2.p2_fast;
732 }
733
734 memset(best_clock, 0, sizeof(*best_clock));
735 max_n = limit->n.max;
736 /* based on hardware requriment prefer smaller n to precision */
737 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
738 /* based on hardware requirment prefere larger m1,m2, p1 */
739 for (clock.m1 = limit->m1.max;
740 clock.m1 >= limit->m1.min; clock.m1--) {
741 for (clock.m2 = limit->m2.max;
742 clock.m2 >= limit->m2.min; clock.m2--) {
743 for (clock.p1 = limit->p1.max;
744 clock.p1 >= limit->p1.min; clock.p1--) {
745 int this_err;
746
747 intel_clock(dev, refclk, &clock);
748 if (!intel_PLL_is_valid(crtc, &clock))
749 continue;
750 this_err = abs((10000 - (target*10000/clock.dot)));
751 if (this_err < err_most) {
752 *best_clock = clock;
753 err_most = this_err;
754 max_n = clock.n;
755 found = true;
756 /* found on first matching */
757 goto out;
758 }
759 }
760 }
761 }
762 }
763out:
634 return found; 764 return found;
635} 765}
636 766
@@ -785,18 +915,292 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
785 return 0; 915 return 0;
786} 916}
787 917
918static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
919{
920 struct drm_device *dev = crtc->dev;
921 struct drm_i915_private *dev_priv = dev->dev_private;
922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
923 int pipe = intel_crtc->pipe;
924 int plane = intel_crtc->pipe;
925 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
926 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
927 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
928 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
929 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
930 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
931 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
932 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
933 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
934 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
935 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
936 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
937 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
938 int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
939 int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
940 int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
941 int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
942 int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
943 int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
944 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
945 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
946 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
947 u32 temp;
948 int tries = 5, j;
788 949
950 /* XXX: When our outputs are all unaware of DPMS modes other than off
951 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
952 */
953 switch (mode) {
954 case DRM_MODE_DPMS_ON:
955 case DRM_MODE_DPMS_STANDBY:
956 case DRM_MODE_DPMS_SUSPEND:
957 DRM_DEBUG("crtc %d dpms on\n", pipe);
958 /* enable PCH DPLL */
959 temp = I915_READ(pch_dpll_reg);
960 if ((temp & DPLL_VCO_ENABLE) == 0) {
961 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
962 I915_READ(pch_dpll_reg);
963 }
789 964
790/** 965 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
791 * Sets the power management mode of the pipe and plane. 966 temp = I915_READ(fdi_rx_reg);
792 * 967 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
793 * This code should probably grow support for turning the cursor off and back 968 FDI_SEL_PCDCLK |
794 * on appropriately at the same time as we're turning the pipe off/on. 969 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
795 */ 970 I915_READ(fdi_rx_reg);
796static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 971 udelay(200);
972
973 /* Enable CPU FDI TX PLL, always on for IGDNG */
974 temp = I915_READ(fdi_tx_reg);
975 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
976 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
977 I915_READ(fdi_tx_reg);
978 udelay(100);
979 }
980
981 /* Enable CPU pipe */
982 temp = I915_READ(pipeconf_reg);
983 if ((temp & PIPEACONF_ENABLE) == 0) {
984 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
985 I915_READ(pipeconf_reg);
986 udelay(100);
987 }
988
989 /* configure and enable CPU plane */
990 temp = I915_READ(dspcntr_reg);
991 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
992 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
993 /* Flush the plane changes */
994 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
995 }
996
997 /* enable CPU FDI TX and PCH FDI RX */
998 temp = I915_READ(fdi_tx_reg);
999 temp |= FDI_TX_ENABLE;
1000 temp |= FDI_DP_PORT_WIDTH_X4; /* default */
1001 temp &= ~FDI_LINK_TRAIN_NONE;
1002 temp |= FDI_LINK_TRAIN_PATTERN_1;
1003 I915_WRITE(fdi_tx_reg, temp);
1004 I915_READ(fdi_tx_reg);
1005
1006 temp = I915_READ(fdi_rx_reg);
1007 temp &= ~FDI_LINK_TRAIN_NONE;
1008 temp |= FDI_LINK_TRAIN_PATTERN_1;
1009 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1010 I915_READ(fdi_rx_reg);
1011
1012 udelay(150);
1013
1014 /* Train FDI. */
1015 /* umask FDI RX Interrupt symbol_lock and bit_lock bit
1016 for train result */
1017 temp = I915_READ(fdi_rx_imr_reg);
1018 temp &= ~FDI_RX_SYMBOL_LOCK;
1019 temp &= ~FDI_RX_BIT_LOCK;
1020 I915_WRITE(fdi_rx_imr_reg, temp);
1021 I915_READ(fdi_rx_imr_reg);
1022 udelay(150);
1023
1024 temp = I915_READ(fdi_rx_iir_reg);
1025 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1026
1027 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1028 for (j = 0; j < tries; j++) {
1029 temp = I915_READ(fdi_rx_iir_reg);
1030 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1031 if (temp & FDI_RX_BIT_LOCK)
1032 break;
1033 udelay(200);
1034 }
1035 if (j != tries)
1036 I915_WRITE(fdi_rx_iir_reg,
1037 temp | FDI_RX_BIT_LOCK);
1038 else
1039 DRM_DEBUG("train 1 fail\n");
1040 } else {
1041 I915_WRITE(fdi_rx_iir_reg,
1042 temp | FDI_RX_BIT_LOCK);
1043 DRM_DEBUG("train 1 ok 2!\n");
1044 }
1045 temp = I915_READ(fdi_tx_reg);
1046 temp &= ~FDI_LINK_TRAIN_NONE;
1047 temp |= FDI_LINK_TRAIN_PATTERN_2;
1048 I915_WRITE(fdi_tx_reg, temp);
1049
1050 temp = I915_READ(fdi_rx_reg);
1051 temp &= ~FDI_LINK_TRAIN_NONE;
1052 temp |= FDI_LINK_TRAIN_PATTERN_2;
1053 I915_WRITE(fdi_rx_reg, temp);
1054
1055 udelay(150);
1056
1057 temp = I915_READ(fdi_rx_iir_reg);
1058 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1059
1060 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1061 for (j = 0; j < tries; j++) {
1062 temp = I915_READ(fdi_rx_iir_reg);
1063 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1064 if (temp & FDI_RX_SYMBOL_LOCK)
1065 break;
1066 udelay(200);
1067 }
1068 if (j != tries) {
1069 I915_WRITE(fdi_rx_iir_reg,
1070 temp | FDI_RX_SYMBOL_LOCK);
1071 DRM_DEBUG("train 2 ok 1!\n");
1072 } else
1073 DRM_DEBUG("train 2 fail\n");
1074 } else {
1075 I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK);
1076 DRM_DEBUG("train 2 ok 2!\n");
1077 }
1078 DRM_DEBUG("train done\n");
1079
1080 /* set transcoder timing */
1081 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
1082 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
1083 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
1084
1085 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
1086 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1087 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
1088
1089 /* enable PCH transcoder */
1090 temp = I915_READ(transconf_reg);
1091 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1092 I915_READ(transconf_reg);
1093
1094 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1095 ;
1096
1097 /* enable normal */
1098
1099 temp = I915_READ(fdi_tx_reg);
1100 temp &= ~FDI_LINK_TRAIN_NONE;
1101 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1102 FDI_TX_ENHANCE_FRAME_ENABLE);
1103 I915_READ(fdi_tx_reg);
1104
1105 temp = I915_READ(fdi_rx_reg);
1106 temp &= ~FDI_LINK_TRAIN_NONE;
1107 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
1108 FDI_RX_ENHANCE_FRAME_ENABLE);
1109 I915_READ(fdi_rx_reg);
1110
1111 /* wait one idle pattern time */
1112 udelay(100);
1113
1114 intel_crtc_load_lut(crtc);
1115
1116 break;
1117 case DRM_MODE_DPMS_OFF:
1118 DRM_DEBUG("crtc %d dpms off\n", pipe);
1119
1120 /* Disable the VGA plane that we never use */
1121 I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE);
1122
1123 /* Disable display plane */
1124 temp = I915_READ(dspcntr_reg);
1125 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
1126 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
1127 /* Flush the plane changes */
1128 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1129 I915_READ(dspbase_reg);
1130 }
1131
1132 /* disable cpu pipe, disable after all planes disabled */
1133 temp = I915_READ(pipeconf_reg);
1134 if ((temp & PIPEACONF_ENABLE) != 0) {
1135 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
1136 I915_READ(pipeconf_reg);
1137 /* wait for cpu pipe off, pipe state */
1138 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0)
1139 ;
1140 } else
1141 DRM_DEBUG("crtc %d is disabled\n", pipe);
1142
1143 /* IGDNG-A : disable cpu panel fitter ? */
1144 temp = I915_READ(pf_ctl_reg);
1145 if ((temp & PF_ENABLE) != 0) {
1146 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1147 I915_READ(pf_ctl_reg);
1148 }
1149
1150 /* disable CPU FDI tx and PCH FDI rx */
1151 temp = I915_READ(fdi_tx_reg);
1152 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
1153 I915_READ(fdi_tx_reg);
1154
1155 temp = I915_READ(fdi_rx_reg);
1156 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1157 I915_READ(fdi_rx_reg);
1158
1159 /* still set train pattern 1 */
1160 temp = I915_READ(fdi_tx_reg);
1161 temp &= ~FDI_LINK_TRAIN_NONE;
1162 temp |= FDI_LINK_TRAIN_PATTERN_1;
1163 I915_WRITE(fdi_tx_reg, temp);
1164
1165 temp = I915_READ(fdi_rx_reg);
1166 temp &= ~FDI_LINK_TRAIN_NONE;
1167 temp |= FDI_LINK_TRAIN_PATTERN_1;
1168 I915_WRITE(fdi_rx_reg, temp);
1169
1170 /* disable PCH transcoder */
1171 temp = I915_READ(transconf_reg);
1172 if ((temp & TRANS_ENABLE) != 0) {
1173 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
1174 I915_READ(transconf_reg);
1175 /* wait for PCH transcoder off, transcoder state */
1176 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0)
1177 ;
1178 }
1179
1180 /* disable PCH DPLL */
1181 temp = I915_READ(pch_dpll_reg);
1182 if ((temp & DPLL_VCO_ENABLE) != 0) {
1183 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
1184 I915_READ(pch_dpll_reg);
1185 }
1186
1187 temp = I915_READ(fdi_rx_reg);
1188 if ((temp & FDI_RX_PLL_ENABLE) != 0) {
1189 temp &= ~FDI_SEL_PCDCLK;
1190 temp &= ~FDI_RX_PLL_ENABLE;
1191 I915_WRITE(fdi_rx_reg, temp);
1192 I915_READ(fdi_rx_reg);
1193 }
1194
1195 /* Wait for the clocks to turn off. */
1196 udelay(150);
1197 break;
1198 }
1199}
1200
1201static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
797{ 1202{
798 struct drm_device *dev = crtc->dev; 1203 struct drm_device *dev = crtc->dev;
799 struct drm_i915_master_private *master_priv;
800 struct drm_i915_private *dev_priv = dev->dev_private; 1204 struct drm_i915_private *dev_priv = dev->dev_private;
801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
802 int pipe = intel_crtc->pipe; 1206 int pipe = intel_crtc->pipe;
@@ -805,7 +1209,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
805 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; 1209 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
806 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1210 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
807 u32 temp; 1211 u32 temp;
808 bool enabled;
809 1212
810 /* XXX: When our outputs are all unaware of DPMS modes other than off 1213 /* XXX: When our outputs are all unaware of DPMS modes other than off
811 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1214 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -890,6 +1293,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
890 udelay(150); 1293 udelay(150);
891 break; 1294 break;
892 } 1295 }
1296}
1297
1298/**
1299 * Sets the power management mode of the pipe and plane.
1300 *
1301 * This code should probably grow support for turning the cursor off and back
1302 * on appropriately at the same time as we're turning the pipe off/on.
1303 */
1304static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
1305{
1306 struct drm_device *dev = crtc->dev;
1307 struct drm_i915_master_private *master_priv;
1308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1309 int pipe = intel_crtc->pipe;
1310 bool enabled;
1311
1312 if (IS_IGDNG(dev))
1313 igdng_crtc_dpms(crtc, mode);
1314 else
1315 i9xx_crtc_dpms(crtc, mode);
893 1316
894 if (!dev->primary->master) 1317 if (!dev->primary->master)
895 return; 1318 return;
@@ -947,6 +1370,12 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
947 struct drm_display_mode *mode, 1370 struct drm_display_mode *mode,
948 struct drm_display_mode *adjusted_mode) 1371 struct drm_display_mode *adjusted_mode)
949{ 1372{
1373 struct drm_device *dev = crtc->dev;
1374 if (IS_IGDNG(dev)) {
1375 /* FDI link clock is fixed at 2.7G */
1376 if (mode->clock * 3 > 27000 * 4)
1377 return MODE_CLOCK_HIGH;
1378 }
950 return true; 1379 return true;
951} 1380}
952 1381
@@ -1030,6 +1459,48 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
1030 return 1; 1459 return 1;
1031} 1460}
1032 1461
1462struct fdi_m_n {
1463 u32 tu;
1464 u32 gmch_m;
1465 u32 gmch_n;
1466 u32 link_m;
1467 u32 link_n;
1468};
1469
1470static void
1471fdi_reduce_ratio(u32 *num, u32 *den)
1472{
1473 while (*num > 0xffffff || *den > 0xffffff) {
1474 *num >>= 1;
1475 *den >>= 1;
1476 }
1477}
1478
1479#define DATA_N 0x800000
1480#define LINK_N 0x80000
1481
1482static void
1483igdng_compute_m_n(int bytes_per_pixel, int nlanes,
1484 int pixel_clock, int link_clock,
1485 struct fdi_m_n *m_n)
1486{
1487 u64 temp;
1488
1489 m_n->tu = 64; /* default size */
1490
1491 temp = (u64) DATA_N * pixel_clock;
1492 temp = div_u64(temp, link_clock);
1493 m_n->gmch_m = (temp * bytes_per_pixel) / nlanes;
1494 m_n->gmch_n = DATA_N;
1495 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
1496
1497 temp = (u64) LINK_N * pixel_clock;
1498 m_n->link_m = div_u64(temp, link_clock);
1499 m_n->link_n = LINK_N;
1500 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
1501}
1502
1503
1033static int intel_crtc_mode_set(struct drm_crtc *crtc, 1504static int intel_crtc_mode_set(struct drm_crtc *crtc,
1034 struct drm_display_mode *mode, 1505 struct drm_display_mode *mode,
1035 struct drm_display_mode *adjusted_mode, 1506 struct drm_display_mode *adjusted_mode,
@@ -1063,6 +1534,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1063 struct drm_connector *connector; 1534 struct drm_connector *connector;
1064 const intel_limit_t *limit; 1535 const intel_limit_t *limit;
1065 int ret; 1536 int ret;
1537 struct fdi_m_n m_n = {0};
1538 int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
1539 int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
1540 int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
1541 int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
1542 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
1543 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
1544 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1545 int lvds_reg = LVDS;
1546 u32 temp;
1547 int sdvo_pixel_multiply;
1066 1548
1067 drm_vblank_pre_modeset(dev, pipe); 1549 drm_vblank_pre_modeset(dev, pipe);
1068 1550
@@ -1101,6 +1583,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1101 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 1583 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
1102 } else if (IS_I9XX(dev)) { 1584 } else if (IS_I9XX(dev)) {
1103 refclk = 96000; 1585 refclk = 96000;
1586 if (IS_IGDNG(dev))
1587 refclk = 120000; /* 120Mhz refclk */
1104 } else { 1588 } else {
1105 refclk = 48000; 1589 refclk = 48000;
1106 } 1590 }
@@ -1114,6 +1598,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1114 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 1598 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
1115 if (!ok) { 1599 if (!ok) {
1116 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1600 DRM_ERROR("Couldn't find PLL settings for mode!\n");
1601 drm_vblank_post_modeset(dev, pipe);
1117 return -EINVAL; 1602 return -EINVAL;
1118 } 1603 }
1119 1604
@@ -1137,12 +1622,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1137 } 1622 }
1138 } 1623 }
1139 1624
1625 /* FDI link */
1626 if (IS_IGDNG(dev))
1627 igdng_compute_m_n(3, 4, /* lane num 4 */
1628 adjusted_mode->clock,
1629 270000, /* lane clock */
1630 &m_n);
1631
1140 if (IS_IGD(dev)) 1632 if (IS_IGD(dev))
1141 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 1633 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1142 else 1634 else
1143 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1635 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
1144 1636
1145 dpll = DPLL_VGA_MODE_DIS; 1637 if (!IS_IGDNG(dev))
1638 dpll = DPLL_VGA_MODE_DIS;
1639
1146 if (IS_I9XX(dev)) { 1640 if (IS_I9XX(dev)) {
1147 if (is_lvds) 1641 if (is_lvds)
1148 dpll |= DPLLB_MODE_LVDS; 1642 dpll |= DPLLB_MODE_LVDS;
@@ -1150,17 +1644,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1150 dpll |= DPLLB_MODE_DAC_SERIAL; 1644 dpll |= DPLLB_MODE_DAC_SERIAL;
1151 if (is_sdvo) { 1645 if (is_sdvo) {
1152 dpll |= DPLL_DVO_HIGH_SPEED; 1646 dpll |= DPLL_DVO_HIGH_SPEED;
1153 if (IS_I945G(dev) || IS_I945GM(dev)) { 1647 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1154 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1648 if (IS_I945G(dev) || IS_I945GM(dev))
1155 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 1649 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
1156 } 1650 else if (IS_IGDNG(dev))
1651 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1157 } 1652 }
1158 1653
1159 /* compute bitmask from p1 value */ 1654 /* compute bitmask from p1 value */
1160 if (IS_IGD(dev)) 1655 if (IS_IGD(dev))
1161 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; 1656 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1162 else 1657 else {
1163 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1658 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1659 /* also FPA1 */
1660 if (IS_IGDNG(dev))
1661 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1662 }
1164 switch (clock.p2) { 1663 switch (clock.p2) {
1165 case 5: 1664 case 5:
1166 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1665 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1175,7 +1674,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1175 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1674 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1176 break; 1675 break;
1177 } 1676 }
1178 if (IS_I965G(dev)) 1677 if (IS_I965G(dev) && !IS_IGDNG(dev))
1179 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 1678 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1180 } else { 1679 } else {
1181 if (is_lvds) { 1680 if (is_lvds) {
@@ -1207,10 +1706,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1207 /* Set up the display plane register */ 1706 /* Set up the display plane register */
1208 dspcntr = DISPPLANE_GAMMA_ENABLE; 1707 dspcntr = DISPPLANE_GAMMA_ENABLE;
1209 1708
1210 if (pipe == 0) 1709 /* IGDNG's plane is forced to pipe, bit 24 is to
1211 dspcntr |= DISPPLANE_SEL_PIPE_A; 1710 enable color space conversion */
1212 else 1711 if (!IS_IGDNG(dev)) {
1213 dspcntr |= DISPPLANE_SEL_PIPE_B; 1712 if (pipe == 0)
1713 dspcntr |= DISPPLANE_SEL_PIPE_A;
1714 else
1715 dspcntr |= DISPPLANE_SEL_PIPE_B;
1716 }
1214 1717
1215 if (pipe == 0 && !IS_I965G(dev)) { 1718 if (pipe == 0 && !IS_I965G(dev)) {
1216 /* Enable pixel doubling when the dot clock is > 90% of the (display) 1719 /* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -1231,12 +1734,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1231 1734
1232 1735
1233 /* Disable the panel fitter if it was on our pipe */ 1736 /* Disable the panel fitter if it was on our pipe */
1234 if (intel_panel_fitter_pipe(dev) == pipe) 1737 if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
1235 I915_WRITE(PFIT_CONTROL, 0); 1738 I915_WRITE(PFIT_CONTROL, 0);
1236 1739
1237 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 1740 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
1238 drm_mode_debug_printmodeline(mode); 1741 drm_mode_debug_printmodeline(mode);
1239 1742
1743 /* assign to IGDNG registers */
1744 if (IS_IGDNG(dev)) {
1745 fp_reg = pch_fp_reg;
1746 dpll_reg = pch_dpll_reg;
1747 }
1240 1748
1241 if (dpll & DPLL_VCO_ENABLE) { 1749 if (dpll & DPLL_VCO_ENABLE) {
1242 I915_WRITE(fp_reg, fp); 1750 I915_WRITE(fp_reg, fp);
@@ -1245,13 +1753,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1245 udelay(150); 1753 udelay(150);
1246 } 1754 }
1247 1755
1756 if (IS_IGDNG(dev)) {
1757 /* enable PCH clock reference source */
1758 /* XXX need to change the setting for other outputs */
1759 u32 temp;
1760 temp = I915_READ(PCH_DREF_CONTROL);
1761 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
1762 temp |= DREF_NONSPREAD_CK505_ENABLE;
1763 temp &= ~DREF_SSC_SOURCE_MASK;
1764 temp |= DREF_SSC_SOURCE_ENABLE;
1765 temp &= ~DREF_SSC1_ENABLE;
1766 /* if no eDP, disable source output to CPU */
1767 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1768 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1769 I915_WRITE(PCH_DREF_CONTROL, temp);
1770 }
1771
1248 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 1772 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
1249 * This is an exception to the general rule that mode_set doesn't turn 1773 * This is an exception to the general rule that mode_set doesn't turn
1250 * things on. 1774 * things on.
1251 */ 1775 */
1252 if (is_lvds) { 1776 if (is_lvds) {
1253 u32 lvds = I915_READ(LVDS); 1777 u32 lvds;
1254 1778
1779 if (IS_IGDNG(dev))
1780 lvds_reg = PCH_LVDS;
1781
1782 lvds = I915_READ(lvds_reg);
1255 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 1783 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
1256 /* Set the B0-B3 data pairs corresponding to whether we're going to 1784 /* Set the B0-B3 data pairs corresponding to whether we're going to
1257 * set the DPLLs for dual-channel mode or not. 1785 * set the DPLLs for dual-channel mode or not.
@@ -1266,8 +1794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1266 * panels behave in the two modes. 1794 * panels behave in the two modes.
1267 */ 1795 */
1268 1796
1269 I915_WRITE(LVDS, lvds); 1797 I915_WRITE(lvds_reg, lvds);
1270 I915_READ(LVDS); 1798 I915_READ(lvds_reg);
1271 } 1799 }
1272 1800
1273 I915_WRITE(fp_reg, fp); 1801 I915_WRITE(fp_reg, fp);
@@ -1276,8 +1804,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1276 /* Wait for the clocks to stabilize. */ 1804 /* Wait for the clocks to stabilize. */
1277 udelay(150); 1805 udelay(150);
1278 1806
1279 if (IS_I965G(dev)) { 1807 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
1280 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1808 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1281 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 1809 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
1282 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 1810 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
1283 } else { 1811 } else {
@@ -1303,9 +1831,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1303 /* pipesrc and dspsize control the size that is scaled from, which should 1831 /* pipesrc and dspsize control the size that is scaled from, which should
1304 * always be the user's requested size. 1832 * always be the user's requested size.
1305 */ 1833 */
1306 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 1834 if (!IS_IGDNG(dev)) {
1307 I915_WRITE(dsppos_reg, 0); 1835 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
1836 (mode->hdisplay - 1));
1837 I915_WRITE(dsppos_reg, 0);
1838 }
1308 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 1839 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
1840
1841 if (IS_IGDNG(dev)) {
1842 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
1843 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
1844 I915_WRITE(link_m1_reg, m_n.link_m);
1845 I915_WRITE(link_n1_reg, m_n.link_n);
1846
1847 /* enable FDI RX PLL too */
1848 temp = I915_READ(fdi_rx_reg);
1849 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1850 udelay(200);
1851 }
1852
1309 I915_WRITE(pipeconf_reg, pipeconf); 1853 I915_WRITE(pipeconf_reg, pipeconf);
1310 I915_READ(pipeconf_reg); 1854 I915_READ(pipeconf_reg);
1311 1855
@@ -1315,12 +1859,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1315 1859
1316 /* Flush the plane changes */ 1860 /* Flush the plane changes */
1317 ret = intel_pipe_set_base(crtc, x, y, old_fb); 1861 ret = intel_pipe_set_base(crtc, x, y, old_fb);
1318 if (ret != 0)
1319 return ret;
1320
1321 drm_vblank_post_modeset(dev, pipe); 1862 drm_vblank_post_modeset(dev, pipe);
1322 1863
1323 return 0; 1864 return ret;
1324} 1865}
1325 1866
1326/** Loads the palette/gamma unit for the CRTC with the prepared values */ 1867/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -1336,6 +1877,11 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
1336 if (!crtc->enabled) 1877 if (!crtc->enabled)
1337 return; 1878 return;
1338 1879
1880 /* use legacy palette for IGDNG */
1881 if (IS_IGDNG(dev))
1882 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
1883 LGC_PALETTE_B;
1884
1339 for (i = 0; i < 256; i++) { 1885 for (i = 0; i < 256; i++) {
1340 I915_WRITE(palreg + 4 * i, 1886 I915_WRITE(palreg + 4 * i,
1341 (intel_crtc->lut_r[i] << 16) | 1887 (intel_crtc->lut_r[i] << 16) |
@@ -1464,16 +2010,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1464 uint32_t adder; 2010 uint32_t adder;
1465 2011
1466 if (x < 0) { 2012 if (x < 0) {
1467 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 2013 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
1468 x = -x; 2014 x = -x;
1469 } 2015 }
1470 if (y < 0) { 2016 if (y < 0) {
1471 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 2017 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
1472 y = -y; 2018 y = -y;
1473 } 2019 }
1474 2020
1475 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 2021 temp |= x << CURSOR_X_SHIFT;
1476 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 2022 temp |= y << CURSOR_Y_SHIFT;
1477 2023
1478 adder = intel_crtc->cursor_addr; 2024 adder = intel_crtc->cursor_addr;
1479 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 2025 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
@@ -1590,6 +2136,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
1590 } 2136 }
1591 2137
1592 encoder->crtc = crtc; 2138 encoder->crtc = crtc;
2139 intel_output->base.encoder = encoder;
1593 intel_output->load_detect_temp = true; 2140 intel_output->load_detect_temp = true;
1594 2141
1595 intel_crtc = to_intel_crtc(crtc); 2142 intel_crtc = to_intel_crtc(crtc);
@@ -1625,6 +2172,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_
1625 2172
1626 if (intel_output->load_detect_temp) { 2173 if (intel_output->load_detect_temp) {
1627 encoder->crtc = NULL; 2174 encoder->crtc = NULL;
2175 intel_output->base.encoder = NULL;
1628 intel_output->load_detect_temp = false; 2176 intel_output->load_detect_temp = false;
1629 crtc->enabled = drm_helper_crtc_in_use(crtc); 2177 crtc->enabled = drm_helper_crtc_in_use(crtc);
1630 drm_helper_disable_unused_functions(dev); 2178 drm_helper_disable_unused_functions(dev);
@@ -1762,6 +2310,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
1762{ 2310{
1763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1764 2312
2313 if (intel_crtc->mode_set.mode)
2314 drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode);
1765 drm_crtc_cleanup(crtc); 2315 drm_crtc_cleanup(crtc);
1766 kfree(intel_crtc); 2316 kfree(intel_crtc);
1767} 2317}
@@ -1888,7 +2438,24 @@ static void intel_setup_outputs(struct drm_device *dev)
1888 if (IS_MOBILE(dev) && !IS_I830(dev)) 2438 if (IS_MOBILE(dev) && !IS_I830(dev))
1889 intel_lvds_init(dev); 2439 intel_lvds_init(dev);
1890 2440
1891 if (IS_I9XX(dev)) { 2441 if (IS_IGDNG(dev)) {
2442 int found;
2443
2444 if (I915_READ(HDMIB) & PORT_DETECTED) {
2445 /* check SDVOB */
2446 /* found = intel_sdvo_init(dev, HDMIB); */
2447 found = 0;
2448 if (!found)
2449 intel_hdmi_init(dev, HDMIB);
2450 }
2451
2452 if (I915_READ(HDMIC) & PORT_DETECTED)
2453 intel_hdmi_init(dev, HDMIC);
2454
2455 if (I915_READ(HDMID) & PORT_DETECTED)
2456 intel_hdmi_init(dev, HDMID);
2457
2458 } else if (IS_I9XX(dev)) {
1892 int found; 2459 int found;
1893 u32 reg; 2460 u32 reg;
1894 2461
@@ -1912,7 +2479,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1912 } else 2479 } else
1913 intel_dvo_init(dev); 2480 intel_dvo_init(dev);
1914 2481
1915 if (IS_I9XX(dev) && IS_MOBILE(dev)) 2482 if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
1916 intel_tv_init(dev); 2483 intel_tv_init(dev);
1917 2484
1918 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2485 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e4652dcdd9bb..0ecf6b76a401 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -207,7 +207,7 @@ static int intelfb_set_par(struct fb_info *info)
207 207
208 if (var->pixclock != -1) { 208 if (var->pixclock != -1) {
209 209
210 DRM_ERROR("PIXEL CLCOK SET\n"); 210 DRM_ERROR("PIXEL CLOCK SET\n");
211 return -EINVAL; 211 return -EINVAL;
212 } else { 212 } else {
213 struct drm_crtc *crtc; 213 struct drm_crtc *crtc;
@@ -674,8 +674,12 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *
674 par->crtc_ids[0] = crtc->base.id; 674 par->crtc_ids[0] = crtc->base.id;
675 675
676 modeset->num_connectors = conn_count; 676 modeset->num_connectors = conn_count;
677 if (modeset->mode != modeset->crtc->desired_mode) 677 if (modeset->crtc->desired_mode) {
678 modeset->mode = modeset->crtc->desired_mode; 678 if (modeset->mode)
679 drm_mode_destroy(dev, modeset->mode);
680 modeset->mode = drm_mode_duplicate(dev,
681 modeset->crtc->desired_mode);
682 }
679 683
680 par->crtc_count = 1; 684 par->crtc_count = 1;
681 685
@@ -824,8 +828,12 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
824 par->crtc_ids[crtc_count++] = crtc->base.id; 828 par->crtc_ids[crtc_count++] = crtc->base.id;
825 829
826 modeset->num_connectors = conn_count; 830 modeset->num_connectors = conn_count;
827 if (modeset->mode != modeset->crtc->desired_mode) 831 if (modeset->crtc->desired_mode) {
828 modeset->mode = modeset->crtc->desired_mode; 832 if (modeset->mode)
833 drm_mode_destroy(dev, modeset->mode);
834 modeset->mode = drm_mode_duplicate(dev,
835 modeset->crtc->desired_mode);
836 }
829 } 837 }
830 par->crtc_count = crtc_count; 838 par->crtc_count = crtc_count;
831 839
@@ -857,9 +865,15 @@ void intelfb_restore(void)
857 drm_crtc_helper_set_config(&kernelfb_mode); 865 drm_crtc_helper_set_config(&kernelfb_mode);
858} 866}
859 867
868static void intelfb_restore_work_fn(struct work_struct *ignored)
869{
870 intelfb_restore();
871}
872static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
873
860static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) 874static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
861{ 875{
862 intelfb_restore(); 876 schedule_work(&intelfb_restore_work);
863} 877}
864 878
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 879static struct sysrq_key_op sysrq_intelfb_restore_op = {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 7d6bdd705326..4ea2a651b92c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -56,7 +56,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
56 sdvox = SDVO_ENCODING_HDMI | 56 sdvox = SDVO_ENCODING_HDMI |
57 SDVO_BORDER_ENABLE | 57 SDVO_BORDER_ENABLE |
58 SDVO_VSYNC_ACTIVE_HIGH | 58 SDVO_VSYNC_ACTIVE_HIGH |
59 SDVO_HSYNC_ACTIVE_HIGH; 59 SDVO_HSYNC_ACTIVE_HIGH |
60 SDVO_NULL_PACKETS_DURING_VSYNC;
60 61
61 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
62 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
@@ -145,6 +146,22 @@ intel_hdmi_sink_detect(struct drm_connector *connector)
145} 146}
146 147
147static enum drm_connector_status 148static enum drm_connector_status
149igdng_hdmi_detect(struct drm_connector *connector)
150{
151 struct intel_output *intel_output = to_intel_output(connector);
152 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
153
154 /* FIXME hotplug detect */
155
156 hdmi_priv->has_hdmi_sink = false;
157 intel_hdmi_sink_detect(connector);
158 if (hdmi_priv->has_hdmi_sink)
159 return connector_status_connected;
160 else
161 return connector_status_disconnected;
162}
163
164static enum drm_connector_status
148intel_hdmi_detect(struct drm_connector *connector) 165intel_hdmi_detect(struct drm_connector *connector)
149{ 166{
150 struct drm_device *dev = connector->dev; 167 struct drm_device *dev = connector->dev;
@@ -153,6 +170,9 @@ intel_hdmi_detect(struct drm_connector *connector)
153 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 170 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
154 u32 temp, bit; 171 u32 temp, bit;
155 172
173 if (IS_IGDNG(dev))
174 return igdng_hdmi_detect(connector);
175
156 temp = I915_READ(PORT_HOTPLUG_EN); 176 temp = I915_READ(PORT_HOTPLUG_EN);
157 177
158 switch (hdmi_priv->sdvox_reg) { 178 switch (hdmi_priv->sdvox_reg) {
@@ -269,8 +289,17 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
269 /* Set up the DDC bus. */ 289 /* Set up the DDC bus. */
270 if (sdvox_reg == SDVOB) 290 if (sdvox_reg == SDVOB)
271 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 291 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
272 else 292 else if (sdvox_reg == SDVOC)
273 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 293 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
294 else if (sdvox_reg == HDMIB)
295 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
296 "HDMIB");
297 else if (sdvox_reg == HDMIC)
298 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
299 "HDMIC");
300 else if (sdvox_reg == HDMID)
301 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
302 "HDMID");
274 303
275 if (!intel_output->ddc_bus) 304 if (!intel_output->ddc_bus)
276 goto err_connector; 305 goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 53cccfa58b95..f073ed8432e8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -37,6 +37,8 @@
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40#define I915_LVDS "i915_lvds"
41
40/** 42/**
41 * Sets the backlight level. 43 * Sets the backlight level.
42 * 44 *
@@ -45,10 +47,15 @@
45static void intel_lvds_set_backlight(struct drm_device *dev, int level) 47static void intel_lvds_set_backlight(struct drm_device *dev, int level)
46{ 48{
47 struct drm_i915_private *dev_priv = dev->dev_private; 49 struct drm_i915_private *dev_priv = dev->dev_private;
48 u32 blc_pwm_ctl; 50 u32 blc_pwm_ctl, reg;
51
52 if (IS_IGDNG(dev))
53 reg = BLC_PWM_CPU_CTL;
54 else
55 reg = BLC_PWM_CTL;
49 56
50 blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; 57 blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
51 I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | 58 I915_WRITE(reg, (blc_pwm_ctl |
52 (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); 59 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
53} 60}
54 61
@@ -58,8 +65,14 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
58static u32 intel_lvds_get_max_backlight(struct drm_device *dev) 65static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
59{ 66{
60 struct drm_i915_private *dev_priv = dev->dev_private; 67 struct drm_i915_private *dev_priv = dev->dev_private;
68 u32 reg;
69
70 if (IS_IGDNG(dev))
71 reg = BLC_PWM_PCH_CTL2;
72 else
73 reg = BLC_PWM_CTL;
61 74
62 return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> 75 return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
63 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; 76 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
64} 77}
65 78
@@ -69,23 +82,31 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
69static void intel_lvds_set_power(struct drm_device *dev, bool on) 82static void intel_lvds_set_power(struct drm_device *dev, bool on)
70{ 83{
71 struct drm_i915_private *dev_priv = dev->dev_private; 84 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 pp_status; 85 u32 pp_status, ctl_reg, status_reg;
86
87 if (IS_IGDNG(dev)) {
88 ctl_reg = PCH_PP_CONTROL;
89 status_reg = PCH_PP_STATUS;
90 } else {
91 ctl_reg = PP_CONTROL;
92 status_reg = PP_STATUS;
93 }
73 94
74 if (on) { 95 if (on) {
75 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | 96 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
76 POWER_TARGET_ON); 97 POWER_TARGET_ON);
77 do { 98 do {
78 pp_status = I915_READ(PP_STATUS); 99 pp_status = I915_READ(status_reg);
79 } while ((pp_status & PP_ON) == 0); 100 } while ((pp_status & PP_ON) == 0);
80 101
81 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); 102 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
82 } else { 103 } else {
83 intel_lvds_set_backlight(dev, 0); 104 intel_lvds_set_backlight(dev, 0);
84 105
85 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
86 ~POWER_TARGET_ON); 107 ~POWER_TARGET_ON);
87 do { 108 do {
88 pp_status = I915_READ(PP_STATUS); 109 pp_status = I915_READ(status_reg);
89 } while (pp_status & PP_ON); 110 } while (pp_status & PP_ON);
90 } 111 }
91} 112}
@@ -106,12 +127,28 @@ static void intel_lvds_save(struct drm_connector *connector)
106{ 127{
107 struct drm_device *dev = connector->dev; 128 struct drm_device *dev = connector->dev;
108 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
131 u32 pwm_ctl_reg;
132
133 if (IS_IGDNG(dev)) {
134 pp_on_reg = PCH_PP_ON_DELAYS;
135 pp_off_reg = PCH_PP_OFF_DELAYS;
136 pp_ctl_reg = PCH_PP_CONTROL;
137 pp_div_reg = PCH_PP_DIVISOR;
138 pwm_ctl_reg = BLC_PWM_CPU_CTL;
139 } else {
140 pp_on_reg = PP_ON_DELAYS;
141 pp_off_reg = PP_OFF_DELAYS;
142 pp_ctl_reg = PP_CONTROL;
143 pp_div_reg = PP_DIVISOR;
144 pwm_ctl_reg = BLC_PWM_CTL;
145 }
109 146
110 dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS); 147 dev_priv->savePP_ON = I915_READ(pp_on_reg);
111 dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS); 148 dev_priv->savePP_OFF = I915_READ(pp_off_reg);
112 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 149 dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
113 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 150 dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
114 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 151 dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
115 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 152 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
116 BACKLIGHT_DUTY_CYCLE_MASK); 153 BACKLIGHT_DUTY_CYCLE_MASK);
117 154
@@ -127,12 +164,28 @@ static void intel_lvds_restore(struct drm_connector *connector)
127{ 164{
128 struct drm_device *dev = connector->dev; 165 struct drm_device *dev = connector->dev;
129 struct drm_i915_private *dev_priv = dev->dev_private; 166 struct drm_i915_private *dev_priv = dev->dev_private;
167 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
168 u32 pwm_ctl_reg;
169
170 if (IS_IGDNG(dev)) {
171 pp_on_reg = PCH_PP_ON_DELAYS;
172 pp_off_reg = PCH_PP_OFF_DELAYS;
173 pp_ctl_reg = PCH_PP_CONTROL;
174 pp_div_reg = PCH_PP_DIVISOR;
175 pwm_ctl_reg = BLC_PWM_CPU_CTL;
176 } else {
177 pp_on_reg = PP_ON_DELAYS;
178 pp_off_reg = PP_OFF_DELAYS;
179 pp_ctl_reg = PP_CONTROL;
180 pp_div_reg = PP_DIVISOR;
181 pwm_ctl_reg = BLC_PWM_CTL;
182 }
130 183
131 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 184 I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
132 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); 185 I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
133 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); 186 I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
134 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 187 I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
135 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 188 I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
136 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) 189 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
137 intel_lvds_set_power(dev, true); 190 intel_lvds_set_power(dev, true);
138 else 191 else
@@ -216,8 +269,14 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
216{ 269{
217 struct drm_device *dev = encoder->dev; 270 struct drm_device *dev = encoder->dev;
218 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = dev->dev_private;
272 u32 reg;
273
274 if (IS_IGDNG(dev))
275 reg = BLC_PWM_CPU_CTL;
276 else
277 reg = BLC_PWM_CTL;
219 278
220 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 279 dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
221 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 280 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
222 BACKLIGHT_DUTY_CYCLE_MASK); 281 BACKLIGHT_DUTY_CYCLE_MASK);
223 282
@@ -251,6 +310,10 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
251 * settings. 310 * settings.
252 */ 311 */
253 312
313 /* No panel fitting yet, fixme */
314 if (IS_IGDNG(dev))
315 return;
316
254 /* 317 /*
255 * Enable automatic panel scaling so that non-native modes fill the 318 * Enable automatic panel scaling so that non-native modes fill the
256 * screen. Should be enabled before the pipe is enabled, according to 319 * screen. Should be enabled before the pipe is enabled, according to
@@ -382,7 +445,8 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
382 445
383static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 446static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
384{ 447{
385 DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident); 448 DRM_DEBUG_KMS(I915_LVDS,
449 "Skipping LVDS initialization for %s\n", id->ident);
386 return 1; 450 return 1;
387} 451}
388 452
@@ -420,8 +484,21 @@ static const struct dmi_system_id intel_no_lvds[] = {
420 DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), 484 DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
421 }, 485 },
422 }, 486 },
423 487 {
424 /* FIXME: add a check for the Aopen Mini PC */ 488 .callback = intel_no_lvds_dmi_callback,
489 .ident = "AOpen Mini PC",
490 .matches = {
491 DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
492 DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
493 },
494 },
495 {
496 .callback = intel_no_lvds_dmi_callback,
497 .ident = "Aopen i945GTt-VFA",
498 .matches = {
499 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
500 },
501 },
425 502
426 { } /* terminating entry */ 503 { } /* terminating entry */
427}; 504};
@@ -442,12 +519,18 @@ void intel_lvds_init(struct drm_device *dev)
442 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 519 struct drm_display_mode *scan; /* *modes, *bios_mode; */
443 struct drm_crtc *crtc; 520 struct drm_crtc *crtc;
444 u32 lvds; 521 u32 lvds;
445 int pipe; 522 int pipe, gpio = GPIOC;
446 523
447 /* Skip init on machines we know falsely report LVDS */ 524 /* Skip init on machines we know falsely report LVDS */
448 if (dmi_check_system(intel_no_lvds)) 525 if (dmi_check_system(intel_no_lvds))
449 return; 526 return;
450 527
528 if (IS_IGDNG(dev)) {
529 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
530 return;
531 gpio = PCH_GPIOC;
532 }
533
451 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 534 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
452 if (!intel_output) { 535 if (!intel_output) {
453 return; 536 return;
@@ -482,7 +565,7 @@ void intel_lvds_init(struct drm_device *dev)
482 */ 565 */
483 566
484 /* Set up the DDC bus. */ 567 /* Set up the DDC bus. */
485 intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); 568 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
486 if (!intel_output->ddc_bus) { 569 if (!intel_output->ddc_bus) {
487 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 570 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
488 "failed.\n"); 571 "failed.\n");
@@ -524,6 +607,11 @@ void intel_lvds_init(struct drm_device *dev)
524 * on. If so, assume that whatever is currently programmed is the 607 * on. If so, assume that whatever is currently programmed is the
525 * correct mode. 608 * correct mode.
526 */ 609 */
610
611 /* IGDNG: FIXME if still fail, not try pipe mode now */
612 if (IS_IGDNG(dev))
613 goto failed;
614
527 lvds = I915_READ(LVDS); 615 lvds = I915_READ(LVDS);
528 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 616 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
529 crtc = intel_get_crtc_from_pipe(dev, pipe); 617 crtc = intel_get_crtc_from_pipe(dev, pipe);
@@ -542,11 +630,22 @@ void intel_lvds_init(struct drm_device *dev)
542 goto failed; 630 goto failed;
543 631
544out: 632out:
633 if (IS_IGDNG(dev)) {
634 u32 pwm;
635 /* make sure PWM is enabled */
636 pwm = I915_READ(BLC_PWM_CPU_CTL2);
637 pwm |= (PWM_ENABLE | PWM_PIPE_B);
638 I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
639
640 pwm = I915_READ(BLC_PWM_PCH_CTL1);
641 pwm |= PWM_PCH_ENABLE;
642 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
643 }
545 drm_sysfs_connector_add(connector); 644 drm_sysfs_connector_add(connector);
546 return; 645 return;
547 646
548failed: 647failed:
549 DRM_DEBUG("No LVDS modes found, disabling.\n"); 648 DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
550 if (intel_output->ddc_bus) 649 if (intel_output->ddc_bus)
551 intel_i2c_destroy(intel_output->ddc_bus); 650 intel_i2c_destroy(intel_output->ddc_bus);
552 drm_connector_cleanup(connector); 651 drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3093b4d4a4dd..9a00adb3a508 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,7 @@
36#include "intel_sdvo_regs.h" 36#include "intel_sdvo_regs.h"
37 37
38#undef SDVO_DEBUG 38#undef SDVO_DEBUG
39 39#define I915_SDVO "i915_sdvo"
40struct intel_sdvo_priv { 40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 41 struct intel_i2c_chan *i2c_bus;
42 int slaveaddr; 42 int slaveaddr;
@@ -277,20 +277,21 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
277 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 277 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
278 int i; 278 int i;
279 279
280 printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); 280 DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
281 SDVO_NAME(sdvo_priv), cmd);
281 for (i = 0; i < args_len; i++) 282 for (i = 0; i < args_len; i++)
282 printk(KERN_DEBUG "%02X ", ((u8 *)args)[i]); 283 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
283 for (; i < 8; i++) 284 for (; i < 8; i++)
284 printk(KERN_DEBUG " "); 285 DRM_LOG_KMS(" ");
285 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { 286 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
286 if (cmd == sdvo_cmd_names[i].cmd) { 287 if (cmd == sdvo_cmd_names[i].cmd) {
287 printk(KERN_DEBUG "(%s)", sdvo_cmd_names[i].name); 288 DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
288 break; 289 break;
289 } 290 }
290 } 291 }
291 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) 292 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
292 printk(KERN_DEBUG "(%02X)", cmd); 293 DRM_LOG_KMS("(%02X)", cmd);
293 printk(KERN_DEBUG "\n"); 294 DRM_LOG_KMS("\n");
294} 295}
295#else 296#else
296#define intel_sdvo_debug_write(o, c, a, l) 297#define intel_sdvo_debug_write(o, c, a, l)
@@ -329,16 +330,16 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
329 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 330 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
330 int i; 331 int i;
331 332
332 printk(KERN_DEBUG "%s: R: ", SDVO_NAME(sdvo_priv)); 333 DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
333 for (i = 0; i < response_len; i++) 334 for (i = 0; i < response_len; i++)
334 printk(KERN_DEBUG "%02X ", ((u8 *)response)[i]); 335 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
335 for (; i < 8; i++) 336 for (; i < 8; i++)
336 printk(KERN_DEBUG " "); 337 DRM_LOG_KMS(" ");
337 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 338 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
338 printk(KERN_DEBUG "(%s)", cmd_status_names[status]); 339 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
339 else 340 else
340 printk(KERN_DEBUG "(??? %d)", status); 341 DRM_LOG_KMS("(??? %d)", status);
341 printk(KERN_DEBUG "\n"); 342 DRM_LOG_KMS("\n");
342} 343}
343#else 344#else
344#define intel_sdvo_debug_response(o, r, l, s) 345#define intel_sdvo_debug_response(o, r, l, s)
@@ -1742,6 +1743,43 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
1742 .master_xfer = intel_sdvo_master_xfer, 1743 .master_xfer = intel_sdvo_master_xfer,
1743}; 1744};
1744 1745
1746static u8
1747intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
1748{
1749 struct drm_i915_private *dev_priv = dev->dev_private;
1750 struct sdvo_device_mapping *my_mapping, *other_mapping;
1751
1752 if (output_device == SDVOB) {
1753 my_mapping = &dev_priv->sdvo_mappings[0];
1754 other_mapping = &dev_priv->sdvo_mappings[1];
1755 } else {
1756 my_mapping = &dev_priv->sdvo_mappings[1];
1757 other_mapping = &dev_priv->sdvo_mappings[0];
1758 }
1759
1760 /* If the BIOS described our SDVO device, take advantage of it. */
1761 if (my_mapping->slave_addr)
1762 return my_mapping->slave_addr;
1763
1764 /* If the BIOS only described a different SDVO device, use the
1765 * address that it isn't using.
1766 */
1767 if (other_mapping->slave_addr) {
1768 if (other_mapping->slave_addr == 0x70)
1769 return 0x72;
1770 else
1771 return 0x70;
1772 }
1773
1774 /* No SDVO device info is found for another DVO port,
1775 * so use mapping assumption we had before BIOS parsing.
1776 */
1777 if (output_device == SDVOB)
1778 return 0x70;
1779 else
1780 return 0x72;
1781}
1782
1745bool intel_sdvo_init(struct drm_device *dev, int output_device) 1783bool intel_sdvo_init(struct drm_device *dev, int output_device)
1746{ 1784{
1747 struct drm_connector *connector; 1785 struct drm_connector *connector;
@@ -1753,6 +1791,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1753 u8 ch[0x40]; 1791 u8 ch[0x40];
1754 int i; 1792 int i;
1755 int encoder_type, output_id; 1793 int encoder_type, output_id;
1794 u8 slave_addr;
1756 1795
1757 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 1796 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
1758 if (!intel_output) { 1797 if (!intel_output) {
@@ -1771,16 +1810,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1771 if (!i2cbus) 1810 if (!i2cbus)
1772 goto err_inteloutput; 1811 goto err_inteloutput;
1773 1812
1813 slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
1774 sdvo_priv->i2c_bus = i2cbus; 1814 sdvo_priv->i2c_bus = i2cbus;
1775 1815
1776 if (output_device == SDVOB) { 1816 if (output_device == SDVOB) {
1777 output_id = 1; 1817 output_id = 1;
1778 sdvo_priv->i2c_bus->slave_addr = 0x38;
1779 } else { 1818 } else {
1780 output_id = 2; 1819 output_id = 2;
1781 sdvo_priv->i2c_bus->slave_addr = 0x39;
1782 } 1820 }
1783 1821 sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
1784 sdvo_priv->output_device = output_device; 1822 sdvo_priv->output_device = output_device;
1785 intel_output->i2c_bus = i2cbus; 1823 intel_output->i2c_bus = i2cbus;
1786 intel_output->dev_priv = sdvo_priv; 1824 intel_output->dev_priv = sdvo_priv;
@@ -1788,8 +1826,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1788 /* Read the regs to test if we can talk to the device */ 1826 /* Read the regs to test if we can talk to the device */
1789 for (i = 0; i < 0x40; i++) { 1827 for (i = 0; i < 0x40; i++) {
1790 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 1828 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
1791 DRM_DEBUG("No SDVO device found on SDVO%c\n", 1829 DRM_DEBUG_KMS(I915_SDVO,
1792 output_device == SDVOB ? 'B' : 'C'); 1830 "No SDVO device found on SDVO%c\n",
1831 output_device == SDVOB ? 'B' : 'C');
1793 goto err_i2c; 1832 goto err_i2c;
1794 } 1833 }
1795 } 1834 }
@@ -1873,9 +1912,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1873 1912
1874 sdvo_priv->controlled_output = 0; 1913 sdvo_priv->controlled_output = 0;
1875 memcpy (bytes, &sdvo_priv->caps.output_flags, 2); 1914 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1876 DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n", 1915 DRM_DEBUG_KMS(I915_SDVO,
1877 SDVO_NAME(sdvo_priv), 1916 "%s: Unknown SDVO output type (0x%02x%02x)\n",
1878 bytes[0], bytes[1]); 1917 SDVO_NAME(sdvo_priv),
1918 bytes[0], bytes[1]);
1879 encoder_type = DRM_MODE_ENCODER_NONE; 1919 encoder_type = DRM_MODE_ENCODER_NONE;
1880 connector_type = DRM_MODE_CONNECTOR_Unknown; 1920 connector_type = DRM_MODE_CONNECTOR_Unknown;
1881 goto err_i2c; 1921 goto err_i2c;
@@ -1905,21 +1945,21 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1905 &sdvo_priv->pixel_clock_max); 1945 &sdvo_priv->pixel_clock_max);
1906 1946
1907 1947
1908 DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " 1948 DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
1909 "clock range %dMHz - %dMHz, " 1949 "clock range %dMHz - %dMHz, "
1910 "input 1: %c, input 2: %c, " 1950 "input 1: %c, input 2: %c, "
1911 "output 1: %c, output 2: %c\n", 1951 "output 1: %c, output 2: %c\n",
1912 SDVO_NAME(sdvo_priv), 1952 SDVO_NAME(sdvo_priv),
1913 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, 1953 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
1914 sdvo_priv->caps.device_rev_id, 1954 sdvo_priv->caps.device_rev_id,
1915 sdvo_priv->pixel_clock_min / 1000, 1955 sdvo_priv->pixel_clock_min / 1000,
1916 sdvo_priv->pixel_clock_max / 1000, 1956 sdvo_priv->pixel_clock_max / 1000,
1917 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', 1957 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
1918 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', 1958 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
1919 /* check currently supported outputs */ 1959 /* check currently supported outputs */
1920 sdvo_priv->caps.output_flags & 1960 sdvo_priv->caps.output_flags &
1921 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', 1961 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
1922 sdvo_priv->caps.output_flags & 1962 sdvo_priv->caps.output_flags &
1923 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 1963 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
1924 1964
1925 return true; 1965 return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 98ac0546b7bd..50d7ed70b338 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1392,6 +1392,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1392 tv_ctl &= ~TV_TEST_MODE_MASK; 1392 tv_ctl &= ~TV_TEST_MODE_MASK;
1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1394 tv_dac &= ~TVDAC_SENSE_MASK; 1394 tv_dac &= ~TVDAC_SENSE_MASK;
1395 tv_dac &= ~DAC_A_MASK;
1396 tv_dac &= ~DAC_B_MASK;
1397 tv_dac &= ~DAC_C_MASK;
1395 tv_dac |= (TVDAC_STATE_CHG_EN | 1398 tv_dac |= (TVDAC_STATE_CHG_EN |
1396 TVDAC_A_SENSE_CTL | 1399 TVDAC_A_SENSE_CTL |
1397 TVDAC_B_SENSE_CTL | 1400 TVDAC_B_SENSE_CTL |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index bc9d09dfa8e7..146f3570af8e 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -478,26 +478,27 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
478 478
479 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) { 479 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) {
480 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 480 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
481 DRM_INFO("Loading RV770 PFP Microcode\n"); 481 DRM_INFO("Loading RV770/RV790 PFP Microcode\n");
482 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 482 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
483 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]); 483 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]);
484 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 484 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
485 485
486 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 486 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
487 DRM_INFO("Loading RV770 CP Microcode\n"); 487 DRM_INFO("Loading RV770/RV790 CP Microcode\n");
488 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 488 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
489 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]); 489 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]);
490 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 490 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
491 491
492 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730)) { 492 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730) ||
493 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)) {
493 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 494 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
494 DRM_INFO("Loading RV730 PFP Microcode\n"); 495 DRM_INFO("Loading RV730/RV740 PFP Microcode\n");
495 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 496 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
496 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]); 497 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]);
497 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 498 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
498 499
499 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 500 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
500 DRM_INFO("Loading RV730 CP Microcode\n"); 501 DRM_INFO("Loading RV730/RV740 CP Microcode\n");
501 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 502 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
502 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]); 503 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]);
503 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 504 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
@@ -1324,6 +1325,10 @@ static void r700_gfx_init(struct drm_device *dev,
1324 dev_priv->r700_sc_prim_fifo_size = 0xf9; 1325 dev_priv->r700_sc_prim_fifo_size = 0xf9;
1325 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1326 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1326 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1327 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1328 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1329 dev_priv->r600_sx_max_export_pos_size -= 16;
1330 dev_priv->r600_sx_max_export_smx_size += 16;
1331 }
1327 break; 1332 break;
1328 case CHIP_RV710: 1333 case CHIP_RV710:
1329 dev_priv->r600_max_pipes = 2; 1334 dev_priv->r600_max_pipes = 2;
@@ -1345,6 +1350,31 @@ static void r700_gfx_init(struct drm_device *dev,
1345 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1350 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1346 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1351 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1347 break; 1352 break;
1353 case CHIP_RV740:
1354 dev_priv->r600_max_pipes = 4;
1355 dev_priv->r600_max_tile_pipes = 4;
1356 dev_priv->r600_max_simds = 8;
1357 dev_priv->r600_max_backends = 4;
1358 dev_priv->r600_max_gprs = 256;
1359 dev_priv->r600_max_threads = 248;
1360 dev_priv->r600_max_stack_entries = 512;
1361 dev_priv->r600_max_hw_contexts = 8;
1362 dev_priv->r600_max_gs_threads = 16 * 2;
1363 dev_priv->r600_sx_max_export_size = 256;
1364 dev_priv->r600_sx_max_export_pos_size = 32;
1365 dev_priv->r600_sx_max_export_smx_size = 224;
1366 dev_priv->r600_sq_num_cf_insts = 2;
1367
1368 dev_priv->r700_sx_num_of_sets = 7;
1369 dev_priv->r700_sc_prim_fifo_size = 0x100;
1370 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1371 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1372
1373 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1374 dev_priv->r600_sx_max_export_pos_size -= 16;
1375 dev_priv->r600_sx_max_export_smx_size += 16;
1376 }
1377 break;
1348 default: 1378 default:
1349 break; 1379 break;
1350 } 1380 }
@@ -1493,6 +1523,7 @@ static void r700_gfx_init(struct drm_device *dev,
1493 break; 1523 break;
1494 case CHIP_RV730: 1524 case CHIP_RV730:
1495 case CHIP_RV710: 1525 case CHIP_RV710:
1526 case CHIP_RV740:
1496 default: 1527 default:
1497 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1528 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
1498 break; 1529 break;
@@ -1569,6 +1600,7 @@ static void r700_gfx_init(struct drm_device *dev,
1569 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1600 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1570 case CHIP_RV770: 1601 case CHIP_RV770:
1571 case CHIP_RV730: 1602 case CHIP_RV730:
1603 case CHIP_RV740:
1572 gs_prim_buffer_depth = 384; 1604 gs_prim_buffer_depth = 384;
1573 break; 1605 break;
1574 case CHIP_RV710: 1606 case CHIP_RV710:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index aff90bb96488..89c4c44169f7 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2109,7 +2109,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2109 2109
2110 /* prebuild the SAREA */ 2110 /* prebuild the SAREA */
2111 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); 2111 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
2112 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, 2112 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
2113 &master_priv->sarea); 2113 &master_priv->sarea);
2114 if (ret) { 2114 if (ret) {
2115 DRM_ERROR("SAREA setup failed\n"); 2115 DRM_ERROR("SAREA setup failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 0c6bfc1de153..127d0456f628 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -146,6 +146,7 @@ enum radeon_family {
146 CHIP_RV770, 146 CHIP_RV770,
147 CHIP_RV730, 147 CHIP_RV730,
148 CHIP_RV710, 148 CHIP_RV710,
149 CHIP_RV740,
149 CHIP_LAST, 150 CHIP_LAST,
150}; 151};
151 152
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 409e00afdd07..327380888b4a 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -195,10 +195,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
195 default: 195 default:
196 vsg->state = dr_via_sg_init; 196 vsg->state = dr_via_sg_init;
197 } 197 }
198 if (vsg->bounce_buffer) { 198 vfree(vsg->bounce_buffer);
199 vfree(vsg->bounce_buffer); 199 vsg->bounce_buffer = NULL;
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0; 200 vsg->free_on_sequence = 0;
203} 201}
204 202
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 403d0e4265db..fc0949a8cfde 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -216,6 +216,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = {
216 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | 216 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
217 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, 217 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
218 .pio_mask = ATA_PIO6, 218 .pio_mask = ATA_PIO6,
219 .chipset = ide_generic,
219}; 220};
220 221
221/* 222/*
@@ -246,8 +247,7 @@ irqreturn_t at91_irq_handler(int irq, void *dev_id)
246static int __init at91_ide_probe(struct platform_device *pdev) 247static int __init at91_ide_probe(struct platform_device *pdev)
247{ 248{
248 int ret; 249 int ret;
249 hw_regs_t hw; 250 struct ide_hw hw, *hws[] = { &hw };
250 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
251 struct ide_host *host; 251 struct ide_host *host;
252 struct resource *res; 252 struct resource *res;
253 unsigned long tf_base = 0, ctl_base = 0; 253 unsigned long tf_base = 0, ctl_base = 0;
@@ -304,10 +304,9 @@ static int __init at91_ide_probe(struct platform_device *pdev)
304 ide_std_init_ports(&hw, tf_base, ctl_base + 6); 304 ide_std_init_ports(&hw, tf_base, ctl_base + 6);
305 305
306 hw.irq = board->irq_pin; 306 hw.irq = board->irq_pin;
307 hw.chipset = ide_generic;
308 hw.dev = &pdev->dev; 307 hw.dev = &pdev->dev;
309 308
310 host = ide_host_alloc(&at91_ide_port_info, hws); 309 host = ide_host_alloc(&at91_ide_port_info, hws, 1);
311 if (!host) { 310 if (!host) {
312 perr("failed to allocate ide host\n"); 311 perr("failed to allocate ide host\n");
313 return -ENOMEM; 312 return -ENOMEM;
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 46013644c965..58121bd6c115 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -449,7 +449,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
449} 449}
450#endif 450#endif
451 451
452static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) 452static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif)
453{ 453{
454 int i; 454 int i;
455 unsigned long *ata_regs = hw->io_ports_array; 455 unsigned long *ata_regs = hw->io_ports_array;
@@ -499,6 +499,7 @@ static const struct ide_port_info au1xxx_port_info = {
499#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 499#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
500 .mwdma_mask = ATA_MWDMA2, 500 .mwdma_mask = ATA_MWDMA2,
501#endif 501#endif
502 .chipset = ide_au1xxx,
502}; 503};
503 504
504static int au_ide_probe(struct platform_device *dev) 505static int au_ide_probe(struct platform_device *dev)
@@ -507,7 +508,7 @@ static int au_ide_probe(struct platform_device *dev)
507 struct resource *res; 508 struct resource *res;
508 struct ide_host *host; 509 struct ide_host *host;
509 int ret = 0; 510 int ret = 0;
510 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 511 struct ide_hw hw, *hws[] = { &hw };
511 512
512#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 513#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
513 char *mode = "MWDMA2"; 514 char *mode = "MWDMA2";
@@ -548,9 +549,8 @@ static int au_ide_probe(struct platform_device *dev)
548 auide_setup_ports(&hw, ahwif); 549 auide_setup_ports(&hw, ahwif);
549 hw.irq = ahwif->irq; 550 hw.irq = ahwif->irq;
550 hw.dev = &dev->dev; 551 hw.dev = &dev->dev;
551 hw.chipset = ide_au1xxx;
552 552
553 ret = ide_host_add(&au1xxx_port_info, hws, &host); 553 ret = ide_host_add(&au1xxx_port_info, hws, 1, &host);
554 if (ret) 554 if (ret)
555 goto out; 555 goto out;
556 556
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index d028f8864bc1..e3c6a5913305 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -121,7 +121,7 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
121 return 1; 121 return 1;
122} 122}
123 123
124static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, 124static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base,
125 unsigned long ctl, unsigned long irq_port, 125 unsigned long ctl, unsigned long irq_port,
126 ide_ack_intr_t *ack_intr) 126 ide_ack_intr_t *ack_intr)
127{ 127{
@@ -139,13 +139,12 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
139 139
140 hw->irq = IRQ_AMIGA_PORTS; 140 hw->irq = IRQ_AMIGA_PORTS;
141 hw->ack_intr = ack_intr; 141 hw->ack_intr = ack_intr;
142
143 hw->chipset = ide_generic;
144} 142}
145 143
146static const struct ide_port_info buddha_port_info = { 144static const struct ide_port_info buddha_port_info = {
147 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 145 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
148 .irq_flags = IRQF_SHARED, 146 .irq_flags = IRQF_SHARED,
147 .chipset = ide_generic,
149}; 148};
150 149
151 /* 150 /*
@@ -161,7 +160,7 @@ static int __init buddha_init(void)
161 160
162 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 161 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
163 unsigned long board; 162 unsigned long board;
164 hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; 163 struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS];
165 164
166 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { 165 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
167 buddha_num_hwifs = BUDDHA_NUM_HWIFS; 166 buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -225,7 +224,7 @@ fail_base2:
225 hws[i] = &hw[i]; 224 hws[i] = &hw[i];
226 } 225 }
227 226
228 ide_host_add(&buddha_port_info, hws, NULL); 227 ide_host_add(&buddha_port_info, hws, i, NULL);
229 } 228 }
230 229
231 return 0; 230 return 0;
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 8890276fef7f..1683ed5c7329 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -708,7 +708,7 @@ static int __init cmd640x_init(void)
708 int second_port_cmd640 = 0, rc; 708 int second_port_cmd640 = 0, rc;
709 const char *bus_type, *port2; 709 const char *bus_type, *port2;
710 u8 b, cfr; 710 u8 b, cfr;
711 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; 711 struct ide_hw hw[2], *hws[2];
712 712
713 if (cmd640_vlb && probe_for_cmd640_vlb()) { 713 if (cmd640_vlb && probe_for_cmd640_vlb()) {
714 bus_type = "VLB"; 714 bus_type = "VLB";
@@ -762,11 +762,9 @@ static int __init cmd640x_init(void)
762 762
763 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); 763 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
764 hw[0].irq = 14; 764 hw[0].irq = 14;
765 hw[0].chipset = ide_cmd640;
766 765
767 ide_std_init_ports(&hw[1], 0x170, 0x376); 766 ide_std_init_ports(&hw[1], 0x170, 0x376);
768 hw[1].irq = 15; 767 hw[1].irq = 15;
769 hw[1].chipset = ide_cmd640;
770 768
771 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 769 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
772 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 770 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
@@ -824,7 +822,8 @@ static int __init cmd640x_init(void)
824 cmd640_dump_regs(); 822 cmd640_dump_regs();
825#endif 823#endif
826 824
827 return ide_host_add(&cmd640_port_info, hws, NULL); 825 return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1,
826 NULL);
828} 827}
829 828
830module_param_named(probe_vlb, cmd640_vlb, bool, 0); 829module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 87987a7d36c9..bd066bb9d611 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -110,7 +110,7 @@ static const struct ide_port_info cyrix_chipset __devinitdata = {
110static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 110static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
111{ 111{
112 const struct ide_port_info *d = &cyrix_chipset; 112 const struct ide_port_info *d = &cyrix_chipset;
113 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; 113 struct ide_hw hw[2], *hws[] = { NULL, NULL };
114 114
115 ide_setup_pci_noise(dev, d); 115 ide_setup_pci_noise(dev, d);
116 116
@@ -136,7 +136,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
136 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); 136 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
137 hw[0].irq = 14; 137 hw[0].irq = 14;
138 138
139 return ide_host_add(d, hws, NULL); 139 return ide_host_add(d, hws, 2, NULL);
140} 140}
141 141
142static const struct pci_device_id cs5520_pci_tbl[] = { 142static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
index f153b95619bb..1e10eba62ceb 100644
--- a/drivers/ide/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -68,6 +68,7 @@ static const struct ide_port_info delkin_cb_port_info = {
68 IDE_HFLAG_NO_DMA, 68 IDE_HFLAG_NO_DMA,
69 .irq_flags = IRQF_SHARED, 69 .irq_flags = IRQF_SHARED,
70 .init_chipset = delkin_cb_init_chipset, 70 .init_chipset = delkin_cb_init_chipset,
71 .chipset = ide_pci,
71}; 72};
72 73
73static int __devinit 74static int __devinit
@@ -76,7 +77,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
76 struct ide_host *host; 77 struct ide_host *host;
77 unsigned long base; 78 unsigned long base;
78 int rc; 79 int rc;
79 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 80 struct ide_hw hw, *hws[] = { &hw };
80 81
81 rc = pci_enable_device(dev); 82 rc = pci_enable_device(dev);
82 if (rc) { 83 if (rc) {
@@ -97,9 +98,8 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
97 ide_std_init_ports(&hw, base + 0x10, base + 0x1e); 98 ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
98 hw.irq = dev->irq; 99 hw.irq = dev->irq;
99 hw.dev = &dev->dev; 100 hw.dev = &dev->dev;
100 hw.chipset = ide_pci; /* this enables IRQ sharing */
101 101
102 rc = ide_host_add(&delkin_cb_port_info, hws, &host); 102 rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host);
103 if (rc) 103 if (rc)
104 goto out_disable; 104 goto out_disable;
105 105
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index 0e2df6755ec9..22fa27389c3b 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -111,9 +111,10 @@ static const struct ide_port_info falconide_port_info = {
111 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | 111 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
112 IDE_HFLAG_NO_DMA, 112 IDE_HFLAG_NO_DMA,
113 .irq_flags = IRQF_SHARED, 113 .irq_flags = IRQF_SHARED,
114 .chipset = ide_generic,
114}; 115};
115 116
116static void __init falconide_setup_ports(hw_regs_t *hw) 117static void __init falconide_setup_ports(struct ide_hw *hw)
117{ 118{
118 int i; 119 int i;
119 120
@@ -128,8 +129,6 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
128 129
129 hw->irq = IRQ_MFP_IDE; 130 hw->irq = IRQ_MFP_IDE;
130 hw->ack_intr = NULL; 131 hw->ack_intr = NULL;
131
132 hw->chipset = ide_generic;
133} 132}
134 133
135 /* 134 /*
@@ -139,7 +138,7 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
139static int __init falconide_init(void) 138static int __init falconide_init(void)
140{ 139{
141 struct ide_host *host; 140 struct ide_host *host;
142 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 141 struct ide_hw hw, *hws[] = { &hw };
143 int rc; 142 int rc;
144 143
145 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) 144 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
@@ -154,7 +153,7 @@ static int __init falconide_init(void)
154 153
155 falconide_setup_ports(&hw); 154 falconide_setup_ports(&hw);
156 155
157 host = ide_host_alloc(&falconide_port_info, hws); 156 host = ide_host_alloc(&falconide_port_info, hws, 1);
158 if (host == NULL) { 157 if (host == NULL) {
159 rc = -ENOMEM; 158 rc = -ENOMEM;
160 goto err; 159 goto err;
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index c7119516c5a7..4451a6a5dfe0 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -88,7 +88,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
88 return 1; 88 return 1;
89} 89}
90 90
91static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, 91static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
92 unsigned long ctl, unsigned long irq_port, 92 unsigned long ctl, unsigned long irq_port,
93 ide_ack_intr_t *ack_intr) 93 ide_ack_intr_t *ack_intr)
94{ 94{
@@ -106,14 +106,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
106 106
107 hw->irq = IRQ_AMIGA_PORTS; 107 hw->irq = IRQ_AMIGA_PORTS;
108 hw->ack_intr = ack_intr; 108 hw->ack_intr = ack_intr;
109
110 hw->chipset = ide_generic;
111} 109}
112 110
113static const struct ide_port_info gayle_port_info = { 111static const struct ide_port_info gayle_port_info = {
114 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | 112 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
115 IDE_HFLAG_NO_DMA, 113 IDE_HFLAG_NO_DMA,
116 .irq_flags = IRQF_SHARED, 114 .irq_flags = IRQF_SHARED,
115 .chipset = ide_generic,
117}; 116};
118 117
119 /* 118 /*
@@ -126,7 +125,7 @@ static int __init gayle_init(void)
126 unsigned long base, ctrlport, irqport; 125 unsigned long base, ctrlport, irqport;
127 ide_ack_intr_t *ack_intr; 126 ide_ack_intr_t *ack_intr;
128 int a4000, i, rc; 127 int a4000, i, rc;
129 hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; 128 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
130 129
131 if (!MACH_IS_AMIGA) 130 if (!MACH_IS_AMIGA)
132 return -ENODEV; 131 return -ENODEV;
@@ -171,7 +170,7 @@ found:
171 hws[i] = &hw[i]; 170 hws[i] = &hw[i];
172 } 171 }
173 172
174 rc = ide_host_add(&gayle_port_info, hws, NULL); 173 rc = ide_host_add(&gayle_port_info, hws, i, NULL);
175 if (rc) 174 if (rc)
176 release_mem_region(res_start, res_n); 175 release_mem_region(res_start, res_n);
177 176
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 0feb66c720e1..7ce68ef6b904 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -138,14 +138,6 @@
138#undef HPT_RESET_STATE_ENGINE 138#undef HPT_RESET_STATE_ENGINE
139#undef HPT_DELAY_INTERRUPT 139#undef HPT_DELAY_INTERRUPT
140 140
141static const char *quirk_drives[] = {
142 "QUANTUM FIREBALLlct08 08",
143 "QUANTUM FIREBALLP KA6.4",
144 "QUANTUM FIREBALLP LM20.4",
145 "QUANTUM FIREBALLP LM20.5",
146 NULL
147};
148
149static const char *bad_ata100_5[] = { 141static const char *bad_ata100_5[] = {
150 "IBM-DTLA-307075", 142 "IBM-DTLA-307075",
151 "IBM-DTLA-307060", 143 "IBM-DTLA-307060",
@@ -729,27 +721,13 @@ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
729 hpt3xx_set_mode(drive, XFER_PIO_0 + pio); 721 hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
730} 722}
731 723
732static void hpt3xx_quirkproc(ide_drive_t *drive)
733{
734 char *m = (char *)&drive->id[ATA_ID_PROD];
735 const char **list = quirk_drives;
736
737 while (*list)
738 if (strstr(m, *list++)) {
739 drive->quirk_list = 1;
740 return;
741 }
742
743 drive->quirk_list = 0;
744}
745
746static void hpt3xx_maskproc(ide_drive_t *drive, int mask) 724static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
747{ 725{
748 ide_hwif_t *hwif = drive->hwif; 726 ide_hwif_t *hwif = drive->hwif;
749 struct pci_dev *dev = to_pci_dev(hwif->dev); 727 struct pci_dev *dev = to_pci_dev(hwif->dev);
750 struct hpt_info *info = hpt3xx_get_info(hwif->dev); 728 struct hpt_info *info = hpt3xx_get_info(hwif->dev);
751 729
752 if (drive->quirk_list == 0) 730 if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
753 return; 731 return;
754 732
755 if (info->chip_type >= HPT370) { 733 if (info->chip_type >= HPT370) {
@@ -1404,7 +1382,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1404static const struct ide_port_ops hpt3xx_port_ops = { 1382static const struct ide_port_ops hpt3xx_port_ops = {
1405 .set_pio_mode = hpt3xx_set_pio_mode, 1383 .set_pio_mode = hpt3xx_set_pio_mode,
1406 .set_dma_mode = hpt3xx_set_mode, 1384 .set_dma_mode = hpt3xx_set_mode,
1407 .quirkproc = hpt3xx_quirkproc,
1408 .maskproc = hpt3xx_maskproc, 1385 .maskproc = hpt3xx_maskproc,
1409 .mdma_filter = hpt3xx_mdma_filter, 1386 .mdma_filter = hpt3xx_mdma_filter,
1410 .udma_filter = hpt3xx_udma_filter, 1387 .udma_filter = hpt3xx_udma_filter,
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 36da913cc553..5af3d0ffaf0a 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -65,8 +65,6 @@ static struct cardinfo icside_cardinfo_v6_2 = {
65}; 65};
66 66
67struct icside_state { 67struct icside_state {
68 unsigned int channel;
69 unsigned int enabled;
70 void __iomem *irq_port; 68 void __iomem *irq_port;
71 void __iomem *ioc_base; 69 void __iomem *ioc_base;
72 unsigned int sel; 70 unsigned int sel;
@@ -116,18 +114,11 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
116 struct icside_state *state = ec->irq_data; 114 struct icside_state *state = ec->irq_data;
117 void __iomem *base = state->irq_port; 115 void __iomem *base = state->irq_port;
118 116
119 state->enabled = 1; 117 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
118 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
120 119
121 switch (state->channel) { 120 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
122 case 0: 121 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
123 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
124 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
125 break;
126 case 1:
127 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
128 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
129 break;
130 }
131} 122}
132 123
133/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) 124/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
@@ -137,8 +128,6 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
137{ 128{
138 struct icside_state *state = ec->irq_data; 129 struct icside_state *state = ec->irq_data;
139 130
140 state->enabled = 0;
141
142 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 131 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
143 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 132 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
144} 133}
@@ -160,44 +149,6 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
160 .irqpending = icside_irqpending_arcin_v6, 149 .irqpending = icside_irqpending_arcin_v6,
161}; 150};
162 151
163/*
164 * Handle routing of interrupts. This is called before
165 * we write the command to the drive.
166 */
167static void icside_maskproc(ide_drive_t *drive, int mask)
168{
169 ide_hwif_t *hwif = drive->hwif;
170 struct expansion_card *ec = ECARD_DEV(hwif->dev);
171 struct icside_state *state = ecard_get_drvdata(ec);
172 unsigned long flags;
173
174 local_irq_save(flags);
175
176 state->channel = hwif->channel;
177
178 if (state->enabled && !mask) {
179 switch (hwif->channel) {
180 case 0:
181 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
182 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
183 break;
184 case 1:
185 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
186 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
187 break;
188 }
189 } else {
190 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
191 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
192 }
193
194 local_irq_restore(flags);
195}
196
197static const struct ide_port_ops icside_v6_no_dma_port_ops = {
198 .maskproc = icside_maskproc,
199};
200
201#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 152#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
202/* 153/*
203 * SG-DMA support. 154 * SG-DMA support.
@@ -275,7 +226,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
275 226
276static const struct ide_port_ops icside_v6_port_ops = { 227static const struct ide_port_ops icside_v6_port_ops = {
277 .set_dma_mode = icside_set_dma_mode, 228 .set_dma_mode = icside_set_dma_mode,
278 .maskproc = icside_maskproc,
279}; 229};
280 230
281static void icside_dma_host_set(ide_drive_t *drive, int on) 231static void icside_dma_host_set(ide_drive_t *drive, int on)
@@ -320,11 +270,6 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
320 BUG_ON(dma_channel_active(ec->dma)); 270 BUG_ON(dma_channel_active(ec->dma));
321 271
322 /* 272 /*
323 * Ensure that we have the right interrupt routed.
324 */
325 icside_maskproc(drive, 0);
326
327 /*
328 * Route the DMA signals to the correct interface. 273 * Route the DMA signals to the correct interface.
329 */ 274 */
330 writeb(state->sel | hwif->channel, state->ioc_base); 275 writeb(state->sel | hwif->channel, state->ioc_base);
@@ -381,7 +326,7 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
381 return -EOPNOTSUPP; 326 return -EOPNOTSUPP;
382} 327}
383 328
384static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, 329static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
385 struct cardinfo *info, struct expansion_card *ec) 330 struct cardinfo *info, struct expansion_card *ec)
386{ 331{
387 unsigned long port = (unsigned long)base + info->dataoffset; 332 unsigned long port = (unsigned long)base + info->dataoffset;
@@ -398,11 +343,11 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
398 343
399 hw->irq = ec->irq; 344 hw->irq = ec->irq;
400 hw->dev = &ec->dev; 345 hw->dev = &ec->dev;
401 hw->chipset = ide_acorn;
402} 346}
403 347
404static const struct ide_port_info icside_v5_port_info = { 348static const struct ide_port_info icside_v5_port_info = {
405 .host_flags = IDE_HFLAG_NO_DMA, 349 .host_flags = IDE_HFLAG_NO_DMA,
350 .chipset = ide_acorn,
406}; 351};
407 352
408static int __devinit 353static int __devinit
@@ -410,7 +355,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
410{ 355{
411 void __iomem *base; 356 void __iomem *base;
412 struct ide_host *host; 357 struct ide_host *host;
413 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 358 struct ide_hw hw, *hws[] = { &hw };
414 int ret; 359 int ret;
415 360
416 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 361 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
@@ -431,7 +376,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
431 376
432 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); 377 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
433 378
434 host = ide_host_alloc(&icside_v5_port_info, hws); 379 host = ide_host_alloc(&icside_v5_port_info, hws, 1);
435 if (host == NULL) 380 if (host == NULL)
436 return -ENODEV; 381 return -ENODEV;
437 382
@@ -452,11 +397,11 @@ err_free:
452 397
453static const struct ide_port_info icside_v6_port_info __initdata = { 398static const struct ide_port_info icside_v6_port_info __initdata = {
454 .init_dma = icside_dma_off_init, 399 .init_dma = icside_dma_off_init,
455 .port_ops = &icside_v6_no_dma_port_ops,
456 .dma_ops = &icside_v6_dma_ops, 400 .dma_ops = &icside_v6_dma_ops,
457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 401 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
458 .mwdma_mask = ATA_MWDMA2, 402 .mwdma_mask = ATA_MWDMA2,
459 .swdma_mask = ATA_SWDMA2, 403 .swdma_mask = ATA_SWDMA2,
404 .chipset = ide_acorn,
460}; 405};
461 406
462static int __devinit 407static int __devinit
@@ -466,7 +411,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
466 struct ide_host *host; 411 struct ide_host *host;
467 unsigned int sel = 0; 412 unsigned int sel = 0;
468 int ret; 413 int ret;
469 hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1], NULL, NULL }; 414 struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
470 struct ide_port_info d = icside_v6_port_info; 415 struct ide_port_info d = icside_v6_port_info;
471 416
472 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 417 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
@@ -506,7 +451,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
506 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); 451 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
507 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); 452 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
508 453
509 host = ide_host_alloc(&d, hws); 454 host = ide_host_alloc(&d, hws, 2);
510 if (host == NULL) 455 if (host == NULL)
511 return -ENODEV; 456 return -ENODEV;
512 457
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
index 78aca75a2c48..979d342c338a 100644
--- a/drivers/ide/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
@@ -25,12 +25,13 @@ static const struct ide_port_info ide_4drives_port_info = {
25 .port_ops = &ide_4drives_port_ops, 25 .port_ops = &ide_4drives_port_ops,
26 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA | 26 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA |
27 IDE_HFLAG_4DRIVES, 27 IDE_HFLAG_4DRIVES,
28 .chipset = ide_4drives,
28}; 29};
29 30
30static int __init ide_4drives_init(void) 31static int __init ide_4drives_init(void)
31{ 32{
32 unsigned long base = 0x1f0, ctl = 0x3f6; 33 unsigned long base = 0x1f0, ctl = 0x3f6;
33 hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL }; 34 struct ide_hw hw, *hws[] = { &hw, &hw };
34 35
35 if (probe_4drives == 0) 36 if (probe_4drives == 0)
36 return -ENODEV; 37 return -ENODEV;
@@ -52,9 +53,8 @@ static int __init ide_4drives_init(void)
52 53
53 ide_std_init_ports(&hw, base, ctl); 54 ide_std_init_ports(&hw, base, ctl);
54 hw.irq = 14; 55 hw.irq = 14;
55 hw.chipset = ide_4drives;
56 56
57 return ide_host_add(&ide_4drives_port_info, hws, NULL); 57 return ide_host_add(&ide_4drives_port_info, hws, 2, NULL);
58} 58}
59 59
60module_init(ide_4drives_init); 60module_init(ide_4drives_init);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 757e5956b132..bbdd2547f12a 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -259,7 +259,7 @@ void ide_retry_pc(ide_drive_t *drive)
259 pc->req_xfer = blk_rq_bytes(sense_rq); 259 pc->req_xfer = blk_rq_bytes(sense_rq);
260 260
261 if (drive->media == ide_tape) 261 if (drive->media == ide_tape)
262 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 262 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
263 263
264 /* 264 /*
265 * Push back the failed request and put request sense on top 265 * Push back the failed request and put request sense on top
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 9e47f3529d55..527908ff298c 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -155,6 +155,7 @@ static const struct ide_port_info idecs_port_info = {
155 .port_ops = &idecs_port_ops, 155 .port_ops = &idecs_port_ops,
156 .host_flags = IDE_HFLAG_NO_DMA, 156 .host_flags = IDE_HFLAG_NO_DMA,
157 .irq_flags = IRQF_SHARED, 157 .irq_flags = IRQF_SHARED,
158 .chipset = ide_pci,
158}; 159};
159 160
160static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, 161static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
@@ -163,7 +164,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
163 struct ide_host *host; 164 struct ide_host *host;
164 ide_hwif_t *hwif; 165 ide_hwif_t *hwif;
165 int i, rc; 166 int i, rc;
166 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 167 struct ide_hw hw, *hws[] = { &hw };
167 168
168 if (!request_region(io, 8, DRV_NAME)) { 169 if (!request_region(io, 8, DRV_NAME)) {
169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 170 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -181,10 +182,9 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
181 memset(&hw, 0, sizeof(hw)); 182 memset(&hw, 0, sizeof(hw));
182 ide_std_init_ports(&hw, io, ctl); 183 ide_std_init_ports(&hw, io, ctl);
183 hw.irq = irq; 184 hw.irq = irq;
184 hw.chipset = ide_pci;
185 hw.dev = &handle->dev; 185 hw.dev = &handle->dev;
186 186
187 rc = ide_host_add(&idecs_port_info, hws, &host); 187 rc = ide_host_add(&idecs_port_info, hws, 1, &host);
188 if (rc) 188 if (rc)
189 goto out_release; 189 goto out_release;
190 190
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index c6f7fcfb9d67..6a1de2169709 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -302,14 +302,12 @@ static const struct drive_list_entry hpa_list[] = {
302 { NULL, NULL } 302 { NULL, NULL }
303}; 303};
304 304
305static void idedisk_check_hpa(ide_drive_t *drive) 305static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
306{ 306{
307 unsigned long long capacity, set_max; 307 u64 capacity, set_max;
308 int lba48 = ata_id_lba48_enabled(drive->id);
309 308
310 capacity = drive->capacity64; 309 capacity = drive->capacity64;
311 310 set_max = idedisk_read_native_max_address(drive, lba48);
312 set_max = idedisk_read_native_max_address(drive, lba48);
313 311
314 if (ide_in_drive_list(drive->id, hpa_list)) { 312 if (ide_in_drive_list(drive->id, hpa_list)) {
315 /* 313 /*
@@ -320,9 +318,31 @@ static void idedisk_check_hpa(ide_drive_t *drive)
320 set_max--; 318 set_max--;
321 } 319 }
322 320
321 return set_max;
322}
323
324static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
325{
326 set_max = idedisk_set_max_address(drive, set_max, lba48);
327 if (set_max)
328 drive->capacity64 = set_max;
329
330 return set_max;
331}
332
333static void idedisk_check_hpa(ide_drive_t *drive)
334{
335 u64 capacity, set_max;
336 int lba48 = ata_id_lba48_enabled(drive->id);
337
338 capacity = drive->capacity64;
339 set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
340
323 if (set_max <= capacity) 341 if (set_max <= capacity)
324 return; 342 return;
325 343
344 drive->probed_capacity = set_max;
345
326 printk(KERN_INFO "%s: Host Protected Area detected.\n" 346 printk(KERN_INFO "%s: Host Protected Area detected.\n"
327 "\tcurrent capacity is %llu sectors (%llu MB)\n" 347 "\tcurrent capacity is %llu sectors (%llu MB)\n"
328 "\tnative capacity is %llu sectors (%llu MB)\n", 348 "\tnative capacity is %llu sectors (%llu MB)\n",
@@ -330,13 +350,13 @@ static void idedisk_check_hpa(ide_drive_t *drive)
330 capacity, sectors_to_MB(capacity), 350 capacity, sectors_to_MB(capacity),
331 set_max, sectors_to_MB(set_max)); 351 set_max, sectors_to_MB(set_max));
332 352
333 set_max = idedisk_set_max_address(drive, set_max, lba48); 353 if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
354 return;
334 355
335 if (set_max) { 356 set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
336 drive->capacity64 = set_max; 357 if (set_max)
337 printk(KERN_INFO "%s: Host Protected Area disabled.\n", 358 printk(KERN_INFO "%s: Host Protected Area disabled.\n",
338 drive->name); 359 drive->name);
339 }
340} 360}
341 361
342static int ide_disk_get_capacity(ide_drive_t *drive) 362static int ide_disk_get_capacity(ide_drive_t *drive)
@@ -358,6 +378,8 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
358 drive->capacity64 = drive->cyl * drive->head * drive->sect; 378 drive->capacity64 = drive->cyl * drive->head * drive->sect;
359 } 379 }
360 380
381 drive->probed_capacity = drive->capacity64;
382
361 if (lba) { 383 if (lba) {
362 drive->dev_flags |= IDE_DFLAG_LBA; 384 drive->dev_flags |= IDE_DFLAG_LBA;
363 385
@@ -376,7 +398,7 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
376 "%llu sectors (%llu MB)\n", 398 "%llu sectors (%llu MB)\n",
377 drive->name, (unsigned long long)drive->capacity64, 399 drive->name, (unsigned long long)drive->capacity64,
378 sectors_to_MB(drive->capacity64)); 400 sectors_to_MB(drive->capacity64));
379 drive->capacity64 = 1ULL << 28; 401 drive->probed_capacity = drive->capacity64 = 1ULL << 28;
380 } 402 }
381 403
382 if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && 404 if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
@@ -392,6 +414,34 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
392 return 0; 414 return 0;
393} 415}
394 416
417static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
418{
419 u64 set = min(capacity, drive->probed_capacity);
420 u16 *id = drive->id;
421 int lba48 = ata_id_lba48_enabled(id);
422
423 if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
424 ata_id_hpa_enabled(id) == 0)
425 goto out;
426
427 /*
428 * according to the spec the SET MAX ADDRESS command shall be
429 * immediately preceded by a READ NATIVE MAX ADDRESS command
430 */
431 capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
432 if (capacity == 0)
433 goto out;
434
435 set = ide_disk_hpa_set_capacity(drive, set, lba48);
436 if (set) {
437 /* needed for ->resume to disable HPA */
438 drive->dev_flags |= IDE_DFLAG_NOHPA;
439 return set;
440 }
441out:
442 return drive->capacity64;
443}
444
395static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) 445static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
396{ 446{
397 ide_drive_t *drive = q->queuedata; 447 ide_drive_t *drive = q->queuedata;
@@ -428,14 +478,14 @@ static int set_multcount(ide_drive_t *drive, int arg)
428 if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff)) 478 if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
429 return -EINVAL; 479 return -EINVAL;
430 480
431 if (drive->special.b.set_multmode) 481 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
432 return -EBUSY; 482 return -EBUSY;
433 483
434 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 484 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
435 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 485 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
436 486
437 drive->mult_req = arg; 487 drive->mult_req = arg;
438 drive->special.b.set_multmode = 1; 488 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
439 error = blk_execute_rq(drive->queue, NULL, rq, 0); 489 error = blk_execute_rq(drive->queue, NULL, rq, 0);
440 blk_put_request(rq); 490 blk_put_request(rq);
441 491
@@ -740,6 +790,7 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
740 790
741const struct ide_disk_ops ide_ata_disk_ops = { 791const struct ide_disk_ops ide_ata_disk_ops = {
742 .check = ide_disk_check, 792 .check = ide_disk_check,
793 .set_capacity = ide_disk_set_capacity,
743 .get_capacity = ide_disk_get_capacity, 794 .get_capacity = ide_disk_get_capacity,
744 .setup = ide_disk_setup, 795 .setup = ide_disk_setup,
745 .flush = ide_disk_flush, 796 .flush = ide_disk_flush,
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 001f68f0bb28..219e6fb78dc6 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -347,7 +347,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
347 347
348 return mode; 348 return mode;
349} 349}
350EXPORT_SYMBOL_GPL(ide_find_dma_mode);
351 350
352static int ide_tune_dma(ide_drive_t *drive) 351static int ide_tune_dma(ide_drive_t *drive)
353{ 352{
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 5d5fb961b5ce..2b9141979613 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -52,7 +52,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
52 } 52 }
53 53
54 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 54 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
55 drive->special.b.recalibrate = 1; 55 drive->special_flags |= IDE_SFLAG_RECALIBRATE;
56 56
57 ++rq->errors; 57 ++rq->errors;
58 58
@@ -268,9 +268,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
268{ 268{
269 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; 269 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
270 270
271 drive->special.all = 0; 271 drive->special_flags =
272 drive->special.b.set_geometry = legacy; 272 legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
273 drive->special.b.recalibrate = legacy;
274 273
275 drive->mult_count = 0; 274 drive->mult_count = 0;
276 drive->dev_flags &= ~IDE_DFLAG_PARKED; 275 drive->dev_flags &= ~IDE_DFLAG_PARKED;
@@ -280,7 +279,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
280 drive->mult_req = 0; 279 drive->mult_req = 0;
281 280
282 if (drive->mult_req != drive->mult_count) 281 if (drive->mult_req != drive->mult_count)
283 drive->special.b.set_multmode = 1; 282 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
284} 283}
285 284
286static void pre_reset(ide_drive_t *drive) 285static void pre_reset(ide_drive_t *drive)
@@ -408,8 +407,9 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
408 /* more than enough time */ 407 /* more than enough time */
409 udelay(10); 408 udelay(10);
410 /* clear SRST, leave nIEN (unless device is on the quirk list) */ 409 /* clear SRST, leave nIEN (unless device is on the quirk list) */
411 tp_ops->write_devctl(hwif, (drive->quirk_list == 2 ? 0 : ATA_NIEN) | 410 tp_ops->write_devctl(hwif,
412 ATA_DEVCTL_OBS); 411 ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
412 ATA_DEVCTL_OBS);
413 /* more than enough time */ 413 /* more than enough time */
414 udelay(10); 414 udelay(10);
415 hwif->poll_timeout = jiffies + WAIT_WORSTCASE; 415 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 4b6b71e2cdf5..214119026b3f 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -287,6 +287,19 @@ static int ide_gd_media_changed(struct gendisk *disk)
287 return ret; 287 return ret;
288} 288}
289 289
290static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
291 unsigned long long capacity)
292{
293 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
294 ide_drive_t *drive = idkp->drive;
295 const struct ide_disk_ops *disk_ops = drive->disk_ops;
296
297 if (disk_ops->set_capacity)
298 return disk_ops->set_capacity(drive, capacity);
299
300 return drive->capacity64;
301}
302
290static int ide_gd_revalidate_disk(struct gendisk *disk) 303static int ide_gd_revalidate_disk(struct gendisk *disk)
291{ 304{
292 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); 305 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
@@ -315,6 +328,7 @@ static struct block_device_operations ide_gd_ops = {
315 .locked_ioctl = ide_gd_ioctl, 328 .locked_ioctl = ide_gd_ioctl,
316 .getgeo = ide_gd_getgeo, 329 .getgeo = ide_gd_getgeo,
317 .media_changed = ide_gd_media_changed, 330 .media_changed = ide_gd_media_changed,
331 .set_capacity = ide_gd_set_capacity,
318 .revalidate_disk = ide_gd_revalidate_disk 332 .revalidate_disk = ide_gd_revalidate_disk
319}; 333};
320 334
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 7812ca0be13b..54d7c4685d23 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -29,6 +29,7 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
29 29
30static const struct ide_port_info ide_generic_port_info = { 30static const struct ide_port_info ide_generic_port_info = {
31 .host_flags = IDE_HFLAG_NO_DMA, 31 .host_flags = IDE_HFLAG_NO_DMA,
32 .chipset = ide_generic,
32}; 33};
33 34
34#ifdef CONFIG_ARM 35#ifdef CONFIG_ARM
@@ -85,7 +86,7 @@ static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
85 86
86static int __init ide_generic_init(void) 87static int __init ide_generic_init(void)
87{ 88{
88 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 89 struct ide_hw hw, *hws[] = { &hw };
89 unsigned long io_addr; 90 unsigned long io_addr;
90 int i, rc = 0, primary = 0, secondary = 0; 91 int i, rc = 0, primary = 0, secondary = 0;
91 92
@@ -132,9 +133,7 @@ static int __init ide_generic_init(void)
132#else 133#else
133 hw.irq = legacy_irqs[i]; 134 hw.irq = legacy_irqs[i];
134#endif 135#endif
135 hw.chipset = ide_generic; 136 rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL);
136
137 rc = ide_host_add(&ide_generic_port_info, hws, NULL);
138 if (rc) { 137 if (rc) {
139 release_region(io_addr + 0x206, 1); 138 release_region(io_addr + 0x206, 1);
140 release_region(io_addr, 8); 139 release_region(io_addr, 8);
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
index c06ebdc4a130..520f42c5445a 100644
--- a/drivers/ide/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
@@ -64,26 +64,26 @@ static const struct ide_tp_ops h8300_tp_ops = {
64 64
65#define H8300_IDE_GAP (2) 65#define H8300_IDE_GAP (2)
66 66
67static inline void hw_setup(hw_regs_t *hw) 67static inline void hw_setup(struct ide_hw *hw)
68{ 68{
69 int i; 69 int i;
70 70
71 memset(hw, 0, sizeof(hw_regs_t)); 71 memset(hw, 0, sizeof(*hw));
72 for (i = 0; i <= 7; i++) 72 for (i = 0; i <= 7; i++)
73 hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; 73 hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
74 hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT; 74 hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
75 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; 75 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
76 hw->chipset = ide_generic;
77} 76}
78 77
79static const struct ide_port_info h8300_port_info = { 78static const struct ide_port_info h8300_port_info = {
80 .tp_ops = &h8300_tp_ops, 79 .tp_ops = &h8300_tp_ops,
81 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, 80 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
81 .chipset = ide_generic,
82}; 82};
83 83
84static int __init h8300_ide_init(void) 84static int __init h8300_ide_init(void)
85{ 85{
86 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 86 struct ide_hw hw, *hws[] = { &hw };
87 87
88 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); 88 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
89 89
@@ -96,7 +96,7 @@ static int __init h8300_ide_init(void)
96 96
97 hw_setup(&hw); 97 hw_setup(&hw);
98 98
99 return ide_host_add(&h8300_port_info, hws, NULL); 99 return ide_host_add(&h8300_port_info, hws, 1, NULL);
100 100
101out_busy: 101out_busy:
102 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); 102 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index bba4297f2f03..272cc38f6dbe 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -184,29 +184,42 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
184 tf->command = ATA_CMD_SET_MULTI; 184 tf->command = ATA_CMD_SET_MULTI;
185} 185}
186 186
187static ide_startstop_t ide_disk_special(ide_drive_t *drive) 187/**
188 * do_special - issue some special commands
189 * @drive: drive the command is for
190 *
191 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
192 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
193 */
194
195static ide_startstop_t do_special(ide_drive_t *drive)
188{ 196{
189 special_t *s = &drive->special;
190 struct ide_cmd cmd; 197 struct ide_cmd cmd;
191 198
199#ifdef DEBUG
200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
201 drive->special_flags);
202#endif
203 if (drive->media != ide_disk) {
204 drive->special_flags = 0;
205 drive->mult_req = 0;
206 return ide_stopped;
207 }
208
192 memset(&cmd, 0, sizeof(cmd)); 209 memset(&cmd, 0, sizeof(cmd));
193 cmd.protocol = ATA_PROT_NODATA; 210 cmd.protocol = ATA_PROT_NODATA;
194 211
195 if (s->b.set_geometry) { 212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
196 s->b.set_geometry = 0; 213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
197 ide_tf_set_specify_cmd(drive, &cmd.tf); 214 ide_tf_set_specify_cmd(drive, &cmd.tf);
198 } else if (s->b.recalibrate) { 215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
199 s->b.recalibrate = 0; 216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
200 ide_tf_set_restore_cmd(drive, &cmd.tf); 217 ide_tf_set_restore_cmd(drive, &cmd.tf);
201 } else if (s->b.set_multmode) { 218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
202 s->b.set_multmode = 0; 219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
203 ide_tf_set_setmult_cmd(drive, &cmd.tf); 220 ide_tf_set_setmult_cmd(drive, &cmd.tf);
204 } else if (s->all) { 221 } else
205 int special = s->all; 222 BUG();
206 s->all = 0;
207 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
208 return ide_stopped;
209 }
210 223
211 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
212 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
@@ -217,31 +230,6 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
217 return ide_started; 230 return ide_started;
218} 231}
219 232
220/**
221 * do_special - issue some special commands
222 * @drive: drive the command is for
223 *
224 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
225 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
226 *
227 * It used to do much more, but has been scaled back.
228 */
229
230static ide_startstop_t do_special (ide_drive_t *drive)
231{
232 special_t *s = &drive->special;
233
234#ifdef DEBUG
235 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
236#endif
237 if (drive->media == ide_disk)
238 return ide_disk_special(drive);
239
240 s->all = 0;
241 drive->mult_req = 0;
242 return ide_stopped;
243}
244
245void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) 233void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
246{ 234{
247 ide_hwif_t *hwif = drive->hwif; 235 ide_hwif_t *hwif = drive->hwif;
@@ -351,7 +339,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
351 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 339 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
352 return startstop; 340 return startstop;
353 } 341 }
354 if (!drive->special.all) { 342
343 if (drive->special_flags == 0) {
355 struct ide_driver *drv; 344 struct ide_driver *drv;
356 345
357 /* 346 /*
@@ -499,11 +488,15 @@ repeat:
499 488
500 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && 489 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
501 hwif != prev_port) { 490 hwif != prev_port) {
491 ide_drive_t *cur_dev =
492 prev_port ? prev_port->cur_dev : NULL;
493
502 /* 494 /*
503 * set nIEN for previous port, drives in the 495 * set nIEN for previous port, drives in the
504 * quirk_list may not like intr setups/cleanups 496 * quirk list may not like intr setups/cleanups
505 */ 497 */
506 if (prev_port && prev_port->cur_dev->quirk_list == 0) 498 if (cur_dev &&
499 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
507 prev_port->tp_ops->write_devctl(prev_port, 500 prev_port->tp_ops->write_devctl(prev_port,
508 ATA_NIEN | 501 ATA_NIEN |
509 ATA_DEVCTL_OBS); 502 ATA_DEVCTL_OBS);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 06fe002116ec..fa047150a1c6 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -282,6 +282,29 @@ no_80w:
282 return 0; 282 return 0;
283} 283}
284 284
285static const char *nien_quirk_list[] = {
286 "QUANTUM FIREBALLlct08 08",
287 "QUANTUM FIREBALLP KA6.4",
288 "QUANTUM FIREBALLP KA9.1",
289 "QUANTUM FIREBALLP KX13.6",
290 "QUANTUM FIREBALLP KX20.5",
291 "QUANTUM FIREBALLP KX27.3",
292 "QUANTUM FIREBALLP LM20.4",
293 "QUANTUM FIREBALLP LM20.5",
294 NULL
295};
296
297void ide_check_nien_quirk_list(ide_drive_t *drive)
298{
299 const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
300
301 for (list = nien_quirk_list; *list != NULL; list++)
302 if (strstr(m, *list) != NULL) {
303 drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK;
304 return;
305 }
306}
307
285int ide_driveid_update(ide_drive_t *drive) 308int ide_driveid_update(ide_drive_t *drive)
286{ 309{
287 u16 *id; 310 u16 *id;
@@ -311,7 +334,6 @@ int ide_driveid_update(ide_drive_t *drive)
311 334
312 return 1; 335 return 1;
313out_err: 336out_err:
314 SELECT_MASK(drive, 0);
315 if (rc == 2) 337 if (rc == 2)
316 printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); 338 printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
317 kfree(id); 339 kfree(id);
@@ -365,7 +387,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
365 387
366 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); 388 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
367 389
368 if (drive->quirk_list == 2) 390 if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
369 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 391 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
370 392
371 error = __ide_wait_stat(drive, drive->ready_stat, 393 error = __ide_wait_stat(drive, drive->ready_stat,
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
index 8c5dcbf22547..b9654a7bb7be 100644
--- a/drivers/ide/ide-legacy.c
+++ b/drivers/ide/ide-legacy.c
@@ -1,7 +1,7 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/ide.h> 2#include <linux/ide.h>
3 3
4static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, 4static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
5 u8 port_no, const struct ide_port_info *d, 5 u8 port_no, const struct ide_port_info *d,
6 unsigned long config) 6 unsigned long config)
7{ 7{
@@ -33,7 +33,6 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
33 33
34 ide_std_init_ports(hw, base, ctl); 34 ide_std_init_ports(hw, base, ctl);
35 hw->irq = irq; 35 hw->irq = irq;
36 hw->chipset = d->chipset;
37 hw->config = config; 36 hw->config = config;
38 37
39 hws[port_no] = hw; 38 hws[port_no] = hw;
@@ -41,7 +40,7 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
41 40
42int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) 41int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
43{ 42{
44 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; 43 struct ide_hw hw[2], *hws[] = { NULL, NULL };
45 44
46 memset(&hw, 0, sizeof(hw)); 45 memset(&hw, 0, sizeof(hw));
47 46
@@ -53,6 +52,6 @@ int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
53 (d->host_flags & IDE_HFLAG_SINGLE)) 52 (d->host_flags & IDE_HFLAG_SINGLE))
54 return -ENOENT; 53 return -ENOENT;
55 54
56 return ide_host_add(d, hws, NULL); 55 return ide_host_add(d, hws, 2, NULL);
57} 56}
58EXPORT_SYMBOL_GPL(ide_legacy_device_add); 57EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 6e80b774e88a..017b1df3b805 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,6 +29,7 @@ static struct pnp_device_id idepnp_devices[] = {
29 29
30static const struct ide_port_info ide_pnp_port_info = { 30static const struct ide_port_info ide_pnp_port_info = {
31 .host_flags = IDE_HFLAG_NO_DMA, 31 .host_flags = IDE_HFLAG_NO_DMA,
32 .chipset = ide_generic,
32}; 33};
33 34
34static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) 35static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
@@ -36,7 +37,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
36 struct ide_host *host; 37 struct ide_host *host;
37 unsigned long base, ctl; 38 unsigned long base, ctl;
38 int rc; 39 int rc;
39 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 40 struct ide_hw hw, *hws[] = { &hw };
40 41
41 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); 42 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
42 43
@@ -62,9 +63,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
62 memset(&hw, 0, sizeof(hw)); 63 memset(&hw, 0, sizeof(hw));
63 ide_std_init_ports(&hw, base, ctl); 64 ide_std_init_ports(&hw, base, ctl);
64 hw.irq = pnp_irq(dev, 0); 65 hw.irq = pnp_irq(dev, 0);
65 hw.chipset = ide_generic;
66 66
67 rc = ide_host_add(&ide_pnp_port_info, hws, &host); 67 rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host);
68 if (rc) 68 if (rc)
69 goto out; 69 goto out;
70 70
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index c895ed52b2e8..f371b0de314f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -97,7 +97,7 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
97 drive->mult_req = id[ATA_ID_MULTSECT] & 0xff; 97 drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
98 98
99 if (drive->mult_req) 99 if (drive->mult_req)
100 drive->special.b.set_multmode = 1; 100 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
101 } 101 }
102} 102}
103 103
@@ -465,23 +465,8 @@ static u8 probe_for_drive(ide_drive_t *drive)
465 int rc; 465 int rc;
466 u8 cmd; 466 u8 cmd;
467 467
468 /*
469 * In order to keep things simple we have an id
470 * block for all drives at all times. If the device
471 * is pre ATA or refuses ATA/ATAPI identify we
472 * will add faked data to this.
473 *
474 * Also note that 0 everywhere means "can't do X"
475 */
476
477 drive->dev_flags &= ~IDE_DFLAG_ID_READ; 468 drive->dev_flags &= ~IDE_DFLAG_ID_READ;
478 469
479 drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL);
480 if (drive->id == NULL) {
481 printk(KERN_ERR "ide: out of memory for id data.\n");
482 return 0;
483 }
484
485 m = (char *)&drive->id[ATA_ID_PROD]; 470 m = (char *)&drive->id[ATA_ID_PROD];
486 strcpy(m, "UNKNOWN"); 471 strcpy(m, "UNKNOWN");
487 472
@@ -497,7 +482,7 @@ static u8 probe_for_drive(ide_drive_t *drive)
497 } 482 }
498 483
499 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 484 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
500 goto out_free; 485 return 0;
501 486
502 /* identification failed? */ 487 /* identification failed? */
503 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { 488 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
@@ -521,7 +506,7 @@ static u8 probe_for_drive(ide_drive_t *drive)
521 } 506 }
522 507
523 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 508 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
524 goto out_free; 509 return 0;
525 510
526 /* The drive wasn't being helpful. Add generic info only */ 511 /* The drive wasn't being helpful. Add generic info only */
527 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { 512 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
@@ -535,9 +520,6 @@ static u8 probe_for_drive(ide_drive_t *drive)
535 } 520 }
536 521
537 return 1; 522 return 1;
538out_free:
539 kfree(drive->id);
540 return 0;
541} 523}
542 524
543static void hwif_release_dev(struct device *dev) 525static void hwif_release_dev(struct device *dev)
@@ -702,8 +684,14 @@ static int ide_probe_port(ide_hwif_t *hwif)
702 if (irqd) 684 if (irqd)
703 disable_irq(hwif->irq); 685 disable_irq(hwif->irq);
704 686
705 if (ide_port_wait_ready(hwif) == -EBUSY) 687 rc = ide_port_wait_ready(hwif);
706 printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); 688 if (rc == -ENODEV) {
689 printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
690 goto out;
691 } else if (rc == -EBUSY)
692 printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
693 else
694 rc = -ENODEV;
707 695
708 /* 696 /*
709 * Second drive should only exist if first drive was found, 697 * Second drive should only exist if first drive was found,
@@ -714,7 +702,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
714 if (drive->dev_flags & IDE_DFLAG_PRESENT) 702 if (drive->dev_flags & IDE_DFLAG_PRESENT)
715 rc = 0; 703 rc = 0;
716 } 704 }
717 705out:
718 /* 706 /*
719 * Use cached IRQ number. It might be (and is...) changed by probe 707 * Use cached IRQ number. It might be (and is...) changed by probe
720 * code above 708 * code above
@@ -732,6 +720,8 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
732 int i; 720 int i;
733 721
734 ide_port_for_each_present_dev(i, drive, hwif) { 722 ide_port_for_each_present_dev(i, drive, hwif) {
723 ide_check_nien_quirk_list(drive);
724
735 if (port_ops && port_ops->quirkproc) 725 if (port_ops && port_ops->quirkproc)
736 port_ops->quirkproc(drive); 726 port_ops->quirkproc(drive);
737 } 727 }
@@ -817,8 +807,6 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
817 if (ide_init_queue(drive)) { 807 if (ide_init_queue(drive)) {
818 printk(KERN_ERR "ide: failed to init %s\n", 808 printk(KERN_ERR "ide: failed to init %s\n",
819 drive->name); 809 drive->name);
820 kfree(drive->id);
821 drive->id = NULL;
822 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 810 drive->dev_flags &= ~IDE_DFLAG_PRESENT;
823 continue; 811 continue;
824 } 812 }
@@ -947,9 +935,6 @@ static void drive_release_dev (struct device *dev)
947 blk_cleanup_queue(drive->queue); 935 blk_cleanup_queue(drive->queue);
948 drive->queue = NULL; 936 drive->queue = NULL;
949 937
950 kfree(drive->id);
951 drive->id = NULL;
952
953 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 938 drive->dev_flags &= ~IDE_DFLAG_PRESENT;
954 939
955 complete(&drive->gendev_rel_comp); 940 complete(&drive->gendev_rel_comp);
@@ -1035,6 +1020,15 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1035 if (port_ops && port_ops->init_dev) 1020 if (port_ops && port_ops->init_dev)
1036 port_ops->init_dev(drive); 1021 port_ops->init_dev(drive);
1037 } 1022 }
1023
1024 ide_port_for_each_dev(i, drive, hwif) {
1025 /*
1026 * default to PIO Mode 0 before we figure out
1027 * the most suited mode for the attached device
1028 */
1029 if (port_ops && port_ops->set_pio_mode)
1030 port_ops->set_pio_mode(drive, 0);
1031 }
1038} 1032}
1039 1033
1040static void ide_init_port(ide_hwif_t *hwif, unsigned int port, 1034static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1042,8 +1036,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1042{ 1036{
1043 hwif->channel = port; 1037 hwif->channel = port;
1044 1038
1045 if (d->chipset) 1039 hwif->chipset = d->chipset ? d->chipset : ide_pci;
1046 hwif->chipset = d->chipset;
1047 1040
1048 if (d->init_iops) 1041 if (d->init_iops)
1049 d->init_iops(hwif); 1042 d->init_iops(hwif);
@@ -1124,16 +1117,19 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
1124 1117
1125 ide_port_for_each_dev(i, drive, hwif) { 1118 ide_port_for_each_dev(i, drive, hwif) {
1126 u8 j = (hwif->index * MAX_DRIVES) + i; 1119 u8 j = (hwif->index * MAX_DRIVES) + i;
1120 u16 *saved_id = drive->id;
1127 1121
1128 memset(drive, 0, sizeof(*drive)); 1122 memset(drive, 0, sizeof(*drive));
1123 memset(saved_id, 0, SECTOR_SIZE);
1124 drive->id = saved_id;
1129 1125
1130 drive->media = ide_disk; 1126 drive->media = ide_disk;
1131 drive->select = (i << 4) | ATA_DEVICE_OBS; 1127 drive->select = (i << 4) | ATA_DEVICE_OBS;
1132 drive->hwif = hwif; 1128 drive->hwif = hwif;
1133 drive->ready_stat = ATA_DRDY; 1129 drive->ready_stat = ATA_DRDY;
1134 drive->bad_wstat = BAD_W_STAT; 1130 drive->bad_wstat = BAD_W_STAT;
1135 drive->special.b.recalibrate = 1; 1131 drive->special_flags = IDE_SFLAG_RECALIBRATE |
1136 drive->special.b.set_geometry = 1; 1132 IDE_SFLAG_SET_GEOMETRY;
1137 drive->name[0] = 'h'; 1133 drive->name[0] = 'h';
1138 drive->name[1] = 'd'; 1134 drive->name[1] = 'd';
1139 drive->name[2] = 'a' + j; 1135 drive->name[2] = 'a' + j;
@@ -1168,11 +1164,10 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
1168 ide_port_init_devices_data(hwif); 1164 ide_port_init_devices_data(hwif);
1169} 1165}
1170 1166
1171static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 1167static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw)
1172{ 1168{
1173 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); 1169 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
1174 hwif->irq = hw->irq; 1170 hwif->irq = hw->irq;
1175 hwif->chipset = hw->chipset;
1176 hwif->dev = hw->dev; 1171 hwif->dev = hw->dev;
1177 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; 1172 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
1178 hwif->ack_intr = hw->ack_intr; 1173 hwif->ack_intr = hw->ack_intr;
@@ -1233,8 +1228,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
1233 ide_drive_t *drive; 1228 ide_drive_t *drive;
1234 int i; 1229 int i;
1235 1230
1236 ide_port_for_each_dev(i, drive, hwif) 1231 ide_port_for_each_dev(i, drive, hwif) {
1232 kfree(drive->id);
1237 kfree(drive); 1233 kfree(drive);
1234 }
1238} 1235}
1239 1236
1240static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) 1237static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
@@ -1248,6 +1245,18 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
1248 if (drive == NULL) 1245 if (drive == NULL)
1249 goto out_nomem; 1246 goto out_nomem;
1250 1247
1248 /*
1249 * In order to keep things simple we have an id
1250 * block for all drives at all times. If the device
1251 * is pre ATA or refuses ATA/ATAPI identify we
1252 * will add faked data to this.
1253 *
1254 * Also note that 0 everywhere means "can't do X"
1255 */
1256 drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
1257 if (drive->id == NULL)
1258 goto out_nomem;
1259
1251 hwif->devices[i] = drive; 1260 hwif->devices[i] = drive;
1252 } 1261 }
1253 return 0; 1262 return 0;
@@ -1257,7 +1266,8 @@ out_nomem:
1257 return -ENOMEM; 1266 return -ENOMEM;
1258} 1267}
1259 1268
1260struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) 1269struct ide_host *ide_host_alloc(const struct ide_port_info *d,
1270 struct ide_hw **hws, unsigned int n_ports)
1261{ 1271{
1262 struct ide_host *host; 1272 struct ide_host *host;
1263 struct device *dev = hws[0] ? hws[0]->dev : NULL; 1273 struct device *dev = hws[0] ? hws[0]->dev : NULL;
@@ -1268,7 +1278,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1268 if (host == NULL) 1278 if (host == NULL)
1269 return NULL; 1279 return NULL;
1270 1280
1271 for (i = 0; i < MAX_HOST_PORTS; i++) { 1281 for (i = 0; i < n_ports; i++) {
1272 ide_hwif_t *hwif; 1282 ide_hwif_t *hwif;
1273 int idx; 1283 int idx;
1274 1284
@@ -1288,6 +1298,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1288 if (idx < 0) { 1298 if (idx < 0) {
1289 printk(KERN_ERR "%s: no free slot for interface\n", 1299 printk(KERN_ERR "%s: no free slot for interface\n",
1290 d ? d->name : "ide"); 1300 d ? d->name : "ide");
1301 ide_port_free_devices(hwif);
1291 kfree(hwif); 1302 kfree(hwif);
1292 continue; 1303 continue;
1293 } 1304 }
@@ -1344,7 +1355,7 @@ static void ide_disable_port(ide_hwif_t *hwif)
1344} 1355}
1345 1356
1346int ide_host_register(struct ide_host *host, const struct ide_port_info *d, 1357int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1347 hw_regs_t **hws) 1358 struct ide_hw **hws)
1348{ 1359{
1349 ide_hwif_t *hwif, *mate = NULL; 1360 ide_hwif_t *hwif, *mate = NULL;
1350 int i, j = 0; 1361 int i, j = 0;
@@ -1438,13 +1449,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1438} 1449}
1439EXPORT_SYMBOL_GPL(ide_host_register); 1450EXPORT_SYMBOL_GPL(ide_host_register);
1440 1451
1441int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws, 1452int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws,
1442 struct ide_host **hostp) 1453 unsigned int n_ports, struct ide_host **hostp)
1443{ 1454{
1444 struct ide_host *host; 1455 struct ide_host *host;
1445 int rc; 1456 int rc;
1446 1457
1447 host = ide_host_alloc(d, hws); 1458 host = ide_host_alloc(d, hws, n_ports);
1448 if (host == NULL) 1459 if (host == NULL)
1449 return -ENOMEM; 1460 return -ENOMEM;
1450 1461
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index d9764f0bc82f..4b447a8a49d4 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -240,18 +240,27 @@ static struct class *idetape_sysfs_class;
240 240
241static void ide_tape_release(struct device *); 241static void ide_tape_release(struct device *);
242 242
243static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) 243static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
244
245static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev,
246 unsigned int i)
244{ 247{
245 struct ide_tape_obj *tape = NULL; 248 struct ide_tape_obj *tape = NULL;
246 249
247 mutex_lock(&idetape_ref_mutex); 250 mutex_lock(&idetape_ref_mutex);
248 tape = ide_drv_g(disk, ide_tape_obj); 251
252 if (cdev)
253 tape = idetape_devs[i];
254 else
255 tape = ide_drv_g(disk, ide_tape_obj);
256
249 if (tape) { 257 if (tape) {
250 if (ide_device_get(tape->drive)) 258 if (ide_device_get(tape->drive))
251 tape = NULL; 259 tape = NULL;
252 else 260 else
253 get_device(&tape->dev); 261 get_device(&tape->dev);
254 } 262 }
263
255 mutex_unlock(&idetape_ref_mutex); 264 mutex_unlock(&idetape_ref_mutex);
256 return tape; 265 return tape;
257} 266}
@@ -267,24 +276,6 @@ static void ide_tape_put(struct ide_tape_obj *tape)
267} 276}
268 277
269/* 278/*
270 * The variables below are used for the character device interface. Additional
271 * state variables are defined in our ide_drive_t structure.
272 */
273static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
274
275static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
276{
277 struct ide_tape_obj *tape = NULL;
278
279 mutex_lock(&idetape_ref_mutex);
280 tape = idetape_devs[i];
281 if (tape)
282 get_device(&tape->dev);
283 mutex_unlock(&idetape_ref_mutex);
284 return tape;
285}
286
287/*
288 * called on each failed packet command retry to analyze the request sense. We 279 * called on each failed packet command retry to analyze the request sense. We
289 * currently do not utilize this information. 280 * currently do not utilize this information.
290 */ 281 */
@@ -397,7 +388,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
397 if (readpos[0] & 0x4) { 388 if (readpos[0] & 0x4) {
398 printk(KERN_INFO "ide-tape: Block location is unknown" 389 printk(KERN_INFO "ide-tape: Block location is unknown"
399 "to the tape\n"); 390 "to the tape\n");
400 clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); 391 clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
392 &drive->atapi_flags);
401 uptodate = 0; 393 uptodate = 0;
402 err = IDE_DRV_ERROR_GENERAL; 394 err = IDE_DRV_ERROR_GENERAL;
403 } else { 395 } else {
@@ -406,7 +398,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
406 398
407 tape->partition = readpos[1]; 399 tape->partition = readpos[1];
408 tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]); 400 tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]);
409 set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); 401 set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
402 &drive->atapi_flags);
410 } 403 }
411 } 404 }
412 405
@@ -656,15 +649,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
656 649
657 if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && 650 if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
658 (rq->cmd[13] & REQ_IDETAPE_PC2) == 0) 651 (rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
659 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 652 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
660 653
661 if (drive->dev_flags & IDE_DFLAG_POST_RESET) { 654 if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
662 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 655 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
663 drive->dev_flags &= ~IDE_DFLAG_POST_RESET; 656 drive->dev_flags &= ~IDE_DFLAG_POST_RESET;
664 } 657 }
665 658
666 if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) && 659 if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) &&
667 (stat & ATA_DSC) == 0) { 660 !(stat & ATA_DSC)) {
668 if (postponed_rq == NULL) { 661 if (postponed_rq == NULL) {
669 tape->dsc_polling_start = jiffies; 662 tape->dsc_polling_start = jiffies;
670 tape->dsc_poll_freq = tape->best_dsc_rw_freq; 663 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
@@ -684,7 +677,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
684 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW; 677 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
685 idetape_postpone_request(drive); 678 idetape_postpone_request(drive);
686 return ide_stopped; 679 return ide_stopped;
687 } 680 } else
681 drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC;
682
688 if (rq->cmd[13] & REQ_IDETAPE_READ) { 683 if (rq->cmd[13] & REQ_IDETAPE_READ) {
689 pc = &tape->queued_pc; 684 pc = &tape->queued_pc;
690 ide_tape_create_rw_cmd(tape, pc, rq, READ_6); 685 ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
@@ -744,7 +739,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
744 int load_attempted = 0; 739 int load_attempted = 0;
745 740
746 /* Wait for the tape to become ready */ 741 /* Wait for the tape to become ready */
747 set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); 742 set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags);
748 timeout += jiffies; 743 timeout += jiffies;
749 while (time_before(jiffies, timeout)) { 744 while (time_before(jiffies, timeout)) {
750 if (ide_do_test_unit_ready(drive, disk) == 0) 745 if (ide_do_test_unit_ready(drive, disk) == 0)
@@ -820,7 +815,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
820 if (tape->chrdev_dir != IDETAPE_DIR_READ) 815 if (tape->chrdev_dir != IDETAPE_DIR_READ)
821 return; 816 return;
822 817
823 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); 818 clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags);
824 tape->valid = 0; 819 tape->valid = 0;
825 if (tape->buf != NULL) { 820 if (tape->buf != NULL) {
826 kfree(tape->buf); 821 kfree(tape->buf);
@@ -1113,7 +1108,8 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1113 1108
1114 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1109 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1115 tape->valid = 0; 1110 tape->valid = 0;
1116 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1111 if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK),
1112 &drive->atapi_flags))
1117 ++count; 1113 ++count;
1118 ide_tape_discard_merge_buffer(drive, 0); 1114 ide_tape_discard_merge_buffer(drive, 0);
1119 } 1115 }
@@ -1168,7 +1164,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1168 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1164 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1169 1165
1170 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1166 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1171 if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags)) 1167 if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags))
1172 if (count > tape->blk_size && 1168 if (count > tape->blk_size &&
1173 (count % tape->blk_size) == 0) 1169 (count % tape->blk_size) == 0)
1174 tape->user_bs_factor = count / tape->blk_size; 1170 tape->user_bs_factor = count / tape->blk_size;
@@ -1184,7 +1180,8 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1184 /* refill if staging buffer is empty */ 1180 /* refill if staging buffer is empty */
1185 if (!tape->valid) { 1181 if (!tape->valid) {
1186 /* If we are at a filemark, nothing more to read */ 1182 /* If we are at a filemark, nothing more to read */
1187 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1183 if (test_bit(ilog2(IDE_AFLAG_FILEMARK),
1184 &drive->atapi_flags))
1188 break; 1185 break;
1189 /* read */ 1186 /* read */
1190 if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, 1187 if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
@@ -1202,7 +1199,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1202 done += todo; 1199 done += todo;
1203 } 1200 }
1204 1201
1205 if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { 1202 if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) {
1206 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1203 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1207 1204
1208 idetape_space_over_filemarks(drive, MTFSF, 1); 1205 idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -1336,7 +1333,8 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
1336 ide_tape_discard_merge_buffer(drive, 0); 1333 ide_tape_discard_merge_buffer(drive, 0);
1337 retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK); 1334 retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK);
1338 if (!retval) 1335 if (!retval)
1339 clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); 1336 clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
1337 &drive->atapi_flags);
1340 return retval; 1338 return retval;
1341 case MTNOP: 1339 case MTNOP:
1342 ide_tape_discard_merge_buffer(drive, 0); 1340 ide_tape_discard_merge_buffer(drive, 0);
@@ -1358,9 +1356,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
1358 mt_count % tape->blk_size) 1356 mt_count % tape->blk_size)
1359 return -EIO; 1357 return -EIO;
1360 tape->user_bs_factor = mt_count / tape->blk_size; 1358 tape->user_bs_factor = mt_count / tape->blk_size;
1361 clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); 1359 clear_bit(ilog2(IDE_AFLAG_DETECT_BS),
1360 &drive->atapi_flags);
1362 } else 1361 } else
1363 set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); 1362 set_bit(ilog2(IDE_AFLAG_DETECT_BS),
1363 &drive->atapi_flags);
1364 return 0; 1364 return 0;
1365 case MTSEEK: 1365 case MTSEEK:
1366 ide_tape_discard_merge_buffer(drive, 0); 1366 ide_tape_discard_merge_buffer(drive, 0);
@@ -1486,7 +1486,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
1486 return -ENXIO; 1486 return -ENXIO;
1487 1487
1488 lock_kernel(); 1488 lock_kernel();
1489 tape = ide_tape_chrdev_get(i); 1489 tape = ide_tape_get(NULL, true, i);
1490 if (!tape) { 1490 if (!tape) {
1491 unlock_kernel(); 1491 unlock_kernel();
1492 return -ENXIO; 1492 return -ENXIO;
@@ -1505,20 +1505,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
1505 1505
1506 filp->private_data = tape; 1506 filp->private_data = tape;
1507 1507
1508 if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) { 1508 if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) {
1509 retval = -EBUSY; 1509 retval = -EBUSY;
1510 goto out_put_tape; 1510 goto out_put_tape;
1511 } 1511 }
1512 1512
1513 retval = idetape_wait_ready(drive, 60 * HZ); 1513 retval = idetape_wait_ready(drive, 60 * HZ);
1514 if (retval) { 1514 if (retval) {
1515 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); 1515 clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
1516 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); 1516 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
1517 goto out_put_tape; 1517 goto out_put_tape;
1518 } 1518 }
1519 1519
1520 idetape_read_position(drive); 1520 idetape_read_position(drive);
1521 if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags)) 1521 if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags))
1522 (void)idetape_rewind_tape(drive); 1522 (void)idetape_rewind_tape(drive);
1523 1523
1524 /* Read block size and write protect status from drive. */ 1524 /* Read block size and write protect status from drive. */
@@ -1534,7 +1534,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
1534 if (tape->write_prot) { 1534 if (tape->write_prot) {
1535 if ((filp->f_flags & O_ACCMODE) == O_WRONLY || 1535 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
1536 (filp->f_flags & O_ACCMODE) == O_RDWR) { 1536 (filp->f_flags & O_ACCMODE) == O_RDWR) {
1537 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); 1537 clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
1538 retval = -EROFS; 1538 retval = -EROFS;
1539 goto out_put_tape; 1539 goto out_put_tape;
1540 } 1540 }
@@ -1591,15 +1591,17 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
1591 ide_tape_discard_merge_buffer(drive, 1); 1591 ide_tape_discard_merge_buffer(drive, 1);
1592 } 1592 }
1593 1593
1594 if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags)) 1594 if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
1595 &drive->atapi_flags))
1595 (void) idetape_rewind_tape(drive); 1596 (void) idetape_rewind_tape(drive);
1597
1596 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 1598 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
1597 if (tape->door_locked == DOOR_LOCKED) { 1599 if (tape->door_locked == DOOR_LOCKED) {
1598 if (!ide_set_media_lock(drive, tape->disk, 0)) 1600 if (!ide_set_media_lock(drive, tape->disk, 0))
1599 tape->door_locked = DOOR_UNLOCKED; 1601 tape->door_locked = DOOR_UNLOCKED;
1600 } 1602 }
1601 } 1603 }
1602 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); 1604 clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
1603 ide_tape_put(tape); 1605 ide_tape_put(tape);
1604 unlock_kernel(); 1606 unlock_kernel();
1605 return 0; 1607 return 0;
@@ -1905,7 +1907,7 @@ static const struct file_operations idetape_fops = {
1905 1907
1906static int idetape_open(struct block_device *bdev, fmode_t mode) 1908static int idetape_open(struct block_device *bdev, fmode_t mode)
1907{ 1909{
1908 struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk); 1910 struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
1909 1911
1910 if (!tape) 1912 if (!tape)
1911 return -ENXIO; 1913 return -ENXIO;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index a0c3e1b2f73c..75b85a8cd2d4 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -98,7 +98,6 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
98 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 98 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
99 ide_tf_dump(drive->name, cmd); 99 ide_tf_dump(drive->name, cmd);
100 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 100 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
101 SELECT_MASK(drive, 0);
102 101
103 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { 102 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
104 u8 data[2] = { cmd->tf.data, cmd->hob.data }; 103 u8 data[2] = { cmd->tf.data, cmd->hob.data };
@@ -166,7 +165,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
166 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 165 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
167 if (custom && tf->command == ATA_CMD_SET_MULTI) { 166 if (custom && tf->command == ATA_CMD_SET_MULTI) {
168 drive->mult_req = drive->mult_count = 0; 167 drive->mult_req = drive->mult_count = 0;
169 drive->special.b.recalibrate = 1; 168 drive->special_flags |= IDE_SFLAG_RECALIBRATE;
170 (void)ide_dump_status(drive, __func__, stat); 169 (void)ide_dump_status(drive, __func__, stat);
171 return ide_stopped; 170 return ide_stopped;
172 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { 171 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 92c9b90931e7..16d056939f9f 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -211,6 +211,11 @@ static unsigned int ide_noflush;
211module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0); 211module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
212MODULE_PARM_DESC(noflush, "disable flush requests for a device"); 212MODULE_PARM_DESC(noflush, "disable flush requests for a device");
213 213
214static unsigned int ide_nohpa;
215
216module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0);
217MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
218
214static unsigned int ide_noprobe; 219static unsigned int ide_noprobe;
215 220
216module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0); 221module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
@@ -281,6 +286,11 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
281 drive->name); 286 drive->name);
282 drive->dev_flags |= IDE_DFLAG_NOFLUSH; 287 drive->dev_flags |= IDE_DFLAG_NOFLUSH;
283 } 288 }
289 if (ide_nohpa & (1 << i)) {
290 printk(KERN_INFO "ide: disabling Host Protected Area for %s\n",
291 drive->name);
292 drive->dev_flags |= IDE_DFLAG_NOHPA;
293 }
284 if (ide_noprobe & (1 << i)) { 294 if (ide_noprobe & (1 << i)) {
285 printk(KERN_INFO "ide: skipping probe for %s\n", drive->name); 295 printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
286 drive->dev_flags |= IDE_DFLAG_NOPROBE; 296 drive->dev_flags |= IDE_DFLAG_NOPROBE;
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 051b4ab0f359..ee9b55ecc62b 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -21,7 +21,7 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/io.h> 22#include <linux/io.h>
23 23
24static void __devinit plat_ide_setup_ports(hw_regs_t *hw, 24static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
25 void __iomem *base, 25 void __iomem *base,
26 void __iomem *ctrl, 26 void __iomem *ctrl,
27 struct pata_platform_info *pdata, 27 struct pata_platform_info *pdata,
@@ -40,12 +40,11 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
40 hw->io_ports.ctl_addr = (unsigned long)ctrl; 40 hw->io_ports.ctl_addr = (unsigned long)ctrl;
41 41
42 hw->irq = irq; 42 hw->irq = irq;
43
44 hw->chipset = ide_generic;
45} 43}
46 44
47static const struct ide_port_info platform_ide_port_info = { 45static const struct ide_port_info platform_ide_port_info = {
48 .host_flags = IDE_HFLAG_NO_DMA, 46 .host_flags = IDE_HFLAG_NO_DMA,
47 .chipset = ide_generic,
49}; 48};
50 49
51static int __devinit plat_ide_probe(struct platform_device *pdev) 50static int __devinit plat_ide_probe(struct platform_device *pdev)
@@ -55,7 +54,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
55 struct pata_platform_info *pdata; 54 struct pata_platform_info *pdata;
56 struct ide_host *host; 55 struct ide_host *host;
57 int ret = 0, mmio = 0; 56 int ret = 0, mmio = 0;
58 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 57 struct ide_hw hw, *hws[] = { &hw };
59 struct ide_port_info d = platform_ide_port_info; 58 struct ide_port_info d = platform_ide_port_info;
60 59
61 pdata = pdev->dev.platform_data; 60 pdata = pdev->dev.platform_data;
@@ -99,7 +98,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
99 if (mmio) 98 if (mmio)
100 d.host_flags |= IDE_HFLAG_MMIO; 99 d.host_flags |= IDE_HFLAG_MMIO;
101 100
102 ret = ide_host_add(&d, hws, &host); 101 ret = ide_host_add(&d, hws, 1, &host);
103 if (ret) 102 if (ret)
104 goto out; 103 goto out;
105 104
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
index 4b1718e83283..1447c8c90565 100644
--- a/drivers/ide/macide.c
+++ b/drivers/ide/macide.c
@@ -62,7 +62,7 @@ int macide_ack_intr(ide_hwif_t* hwif)
62 return 0; 62 return 0;
63} 63}
64 64
65static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, 65static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base,
66 int irq, ide_ack_intr_t *ack_intr) 66 int irq, ide_ack_intr_t *ack_intr)
67{ 67{
68 int i; 68 int i;
@@ -76,13 +76,12 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
76 76
77 hw->irq = irq; 77 hw->irq = irq;
78 hw->ack_intr = ack_intr; 78 hw->ack_intr = ack_intr;
79
80 hw->chipset = ide_generic;
81} 79}
82 80
83static const struct ide_port_info macide_port_info = { 81static const struct ide_port_info macide_port_info = {
84 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 82 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
85 .irq_flags = IRQF_SHARED, 83 .irq_flags = IRQF_SHARED,
84 .chipset = ide_generic,
86}; 85};
87 86
88static const char *mac_ide_name[] = 87static const char *mac_ide_name[] =
@@ -97,7 +96,7 @@ static int __init macide_init(void)
97 ide_ack_intr_t *ack_intr; 96 ide_ack_intr_t *ack_intr;
98 unsigned long base; 97 unsigned long base;
99 int irq; 98 int irq;
100 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 99 struct ide_hw hw, *hws[] = { &hw };
101 100
102 if (!MACH_IS_MAC) 101 if (!MACH_IS_MAC)
103 return -ENODEV; 102 return -ENODEV;
@@ -127,7 +126,7 @@ static int __init macide_init(void)
127 126
128 macide_setup_ports(&hw, base, irq, ack_intr); 127 macide_setup_ports(&hw, base, irq, ack_intr);
129 128
130 return ide_host_add(&macide_port_info, hws, NULL); 129 return ide_host_add(&macide_port_info, hws, 1, NULL);
131} 130}
132 131
133module_init(macide_init); 132module_init(macide_init);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 09d813d313f4..3c1dc0152153 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -306,6 +306,7 @@ static struct ide_port_info __devinitdata palm_bk3710_port_info = {
306 .host_flags = IDE_HFLAG_MMIO, 306 .host_flags = IDE_HFLAG_MMIO,
307 .pio_mask = ATA_PIO4, 307 .pio_mask = ATA_PIO4,
308 .mwdma_mask = ATA_MWDMA2, 308 .mwdma_mask = ATA_MWDMA2,
309 .chipset = ide_palm3710,
309}; 310};
310 311
311static int __init palm_bk3710_probe(struct platform_device *pdev) 312static int __init palm_bk3710_probe(struct platform_device *pdev)
@@ -315,7 +316,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
315 void __iomem *base; 316 void __iomem *base;
316 unsigned long rate, mem_size; 317 unsigned long rate, mem_size;
317 int i, rc; 318 int i, rc;
318 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 319 struct ide_hw hw, *hws[] = { &hw };
319 320
320 clk = clk_get(&pdev->dev, "IDECLK"); 321 clk = clk_get(&pdev->dev, "IDECLK");
321 if (IS_ERR(clk)) 322 if (IS_ERR(clk))
@@ -363,13 +364,12 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
363 (base + IDE_PALM_ATA_PRI_CTL_OFFSET); 364 (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
364 hw.irq = irq->start; 365 hw.irq = irq->start;
365 hw.dev = &pdev->dev; 366 hw.dev = &pdev->dev;
366 hw.chipset = ide_palm3710;
367 367
368 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 : 368 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
369 ATA_UDMA5; 369 ATA_UDMA5;
370 370
371 /* Register the IDE interface with Linux */ 371 /* Register the IDE interface with Linux */
372 rc = ide_host_add(&palm_bk3710_port_info, hws, NULL); 372 rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL);
373 if (rc) 373 if (rc)
374 goto out; 374 goto out;
375 375
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index b68906c3c17e..65ba8239e7b5 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -40,18 +40,6 @@
40#define DBG(fmt, args...) 40#define DBG(fmt, args...)
41#endif 41#endif
42 42
43static const char *pdc_quirk_drives[] = {
44 "QUANTUM FIREBALLlct08 08",
45 "QUANTUM FIREBALLP KA6.4",
46 "QUANTUM FIREBALLP KA9.1",
47 "QUANTUM FIREBALLP LM20.4",
48 "QUANTUM FIREBALLP KX13.6",
49 "QUANTUM FIREBALLP KX20.5",
50 "QUANTUM FIREBALLP KX27.3",
51 "QUANTUM FIREBALLP LM20.5",
52 NULL
53};
54
55static u8 max_dma_rate(struct pci_dev *pdev) 43static u8 max_dma_rate(struct pci_dev *pdev)
56{ 44{
57 u8 mode; 45 u8 mode;
@@ -200,19 +188,6 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
200 return ATA_CBL_PATA80; 188 return ATA_CBL_PATA80;
201} 189}
202 190
203static void pdcnew_quirkproc(ide_drive_t *drive)
204{
205 const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
206
207 for (list = pdc_quirk_drives; *list != NULL; list++)
208 if (strstr(m, *list) != NULL) {
209 drive->quirk_list = 2;
210 return;
211 }
212
213 drive->quirk_list = 0;
214}
215
216static void pdcnew_reset(ide_drive_t *drive) 191static void pdcnew_reset(ide_drive_t *drive)
217{ 192{
218 /* 193 /*
@@ -473,7 +448,6 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
473static const struct ide_port_ops pdcnew_port_ops = { 448static const struct ide_port_ops pdcnew_port_ops = {
474 .set_pio_mode = pdcnew_set_pio_mode, 449 .set_pio_mode = pdcnew_set_pio_mode,
475 .set_dma_mode = pdcnew_set_dma_mode, 450 .set_dma_mode = pdcnew_set_dma_mode,
476 .quirkproc = pdcnew_quirkproc,
477 .resetproc = pdcnew_reset, 451 .resetproc = pdcnew_reset,
478 .cable_detect = pdcnew_cable_detect, 452 .cable_detect = pdcnew_cable_detect,
479}; 453};
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index e24ecc87a9b1..b6abf7e52cac 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -23,18 +23,6 @@
23 23
24#define PDC202XX_DEBUG_DRIVE_INFO 0 24#define PDC202XX_DEBUG_DRIVE_INFO 0
25 25
26static const char *pdc_quirk_drives[] = {
27 "QUANTUM FIREBALLlct08 08",
28 "QUANTUM FIREBALLP KA6.4",
29 "QUANTUM FIREBALLP KA9.1",
30 "QUANTUM FIREBALLP LM20.4",
31 "QUANTUM FIREBALLP KX13.6",
32 "QUANTUM FIREBALLP KX20.5",
33 "QUANTUM FIREBALLP KX27.3",
34 "QUANTUM FIREBALLP LM20.5",
35 NULL
36};
37
38static void pdc_old_disable_66MHz_clock(ide_hwif_t *); 26static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
39 27
40static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed) 28static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
@@ -151,19 +139,6 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
151 outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); 139 outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
152} 140}
153 141
154static void pdc202xx_quirkproc(ide_drive_t *drive)
155{
156 const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
157
158 for (list = pdc_quirk_drives; *list != NULL; list++)
159 if (strstr(m, *list) != NULL) {
160 drive->quirk_list = 2;
161 return;
162 }
163
164 drive->quirk_list = 0;
165}
166
167static void pdc202xx_dma_start(ide_drive_t *drive) 142static void pdc202xx_dma_start(ide_drive_t *drive)
168{ 143{
169 if (drive->current_speed > XFER_UDMA_2) 144 if (drive->current_speed > XFER_UDMA_2)
@@ -203,52 +178,6 @@ static int pdc202xx_dma_end(ide_drive_t *drive)
203 return ide_dma_end(drive); 178 return ide_dma_end(drive);
204} 179}
205 180
206static int pdc202xx_dma_test_irq(ide_drive_t *drive)
207{
208 ide_hwif_t *hwif = drive->hwif;
209 unsigned long high_16 = hwif->extra_base - 16;
210 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
211 u8 sc1d = inb(high_16 + 0x001d);
212
213 if (hwif->channel) {
214 /* bit7: Error, bit6: Interrupting, bit5: FIFO Full, bit4: FIFO Empty */
215 if ((sc1d & 0x50) == 0x50)
216 goto somebody_else;
217 else if ((sc1d & 0x40) == 0x40)
218 return (dma_stat & 4) == 4;
219 } else {
220 /* bit3: Error, bit2: Interrupting, bit1: FIFO Full, bit0: FIFO Empty */
221 if ((sc1d & 0x05) == 0x05)
222 goto somebody_else;
223 else if ((sc1d & 0x04) == 0x04)
224 return (dma_stat & 4) == 4;
225 }
226somebody_else:
227 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
228}
229
230static void pdc202xx_reset(ide_drive_t *drive)
231{
232 ide_hwif_t *hwif = drive->hwif;
233 unsigned long high_16 = hwif->extra_base - 16;
234 u8 udma_speed_flag = inb(high_16 | 0x001f);
235
236 printk(KERN_WARNING "PDC202xx: software reset...\n");
237
238 outb(udma_speed_flag | 0x10, high_16 | 0x001f);
239 mdelay(100);
240 outb(udma_speed_flag & ~0x10, high_16 | 0x001f);
241 mdelay(2000); /* 2 seconds ?! */
242
243 ide_set_max_pio(drive);
244}
245
246static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
247{
248 pdc202xx_reset(drive);
249 ide_dma_lost_irq(drive);
250}
251
252static int init_chipset_pdc202xx(struct pci_dev *dev) 181static int init_chipset_pdc202xx(struct pci_dev *dev)
253{ 182{
254 unsigned long dmabase = pci_resource_start(dev, 4); 183 unsigned long dmabase = pci_resource_start(dev, 4);
@@ -302,37 +231,22 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
302static const struct ide_port_ops pdc20246_port_ops = { 231static const struct ide_port_ops pdc20246_port_ops = {
303 .set_pio_mode = pdc202xx_set_pio_mode, 232 .set_pio_mode = pdc202xx_set_pio_mode,
304 .set_dma_mode = pdc202xx_set_mode, 233 .set_dma_mode = pdc202xx_set_mode,
305 .quirkproc = pdc202xx_quirkproc,
306}; 234};
307 235
308static const struct ide_port_ops pdc2026x_port_ops = { 236static const struct ide_port_ops pdc2026x_port_ops = {
309 .set_pio_mode = pdc202xx_set_pio_mode, 237 .set_pio_mode = pdc202xx_set_pio_mode,
310 .set_dma_mode = pdc202xx_set_mode, 238 .set_dma_mode = pdc202xx_set_mode,
311 .quirkproc = pdc202xx_quirkproc,
312 .resetproc = pdc202xx_reset,
313 .cable_detect = pdc2026x_cable_detect, 239 .cable_detect = pdc2026x_cable_detect,
314}; 240};
315 241
316static const struct ide_dma_ops pdc20246_dma_ops = {
317 .dma_host_set = ide_dma_host_set,
318 .dma_setup = ide_dma_setup,
319 .dma_start = ide_dma_start,
320 .dma_end = ide_dma_end,
321 .dma_test_irq = pdc202xx_dma_test_irq,
322 .dma_lost_irq = ide_dma_lost_irq,
323 .dma_timer_expiry = ide_dma_sff_timer_expiry,
324 .dma_sff_read_status = ide_dma_sff_read_status,
325};
326
327static const struct ide_dma_ops pdc2026x_dma_ops = { 242static const struct ide_dma_ops pdc2026x_dma_ops = {
328 .dma_host_set = ide_dma_host_set, 243 .dma_host_set = ide_dma_host_set,
329 .dma_setup = ide_dma_setup, 244 .dma_setup = ide_dma_setup,
330 .dma_start = pdc202xx_dma_start, 245 .dma_start = pdc202xx_dma_start,
331 .dma_end = pdc202xx_dma_end, 246 .dma_end = pdc202xx_dma_end,
332 .dma_test_irq = pdc202xx_dma_test_irq, 247 .dma_test_irq = ide_dma_test_irq,
333 .dma_lost_irq = pdc202xx_dma_lost_irq, 248 .dma_lost_irq = ide_dma_lost_irq,
334 .dma_timer_expiry = ide_dma_sff_timer_expiry, 249 .dma_timer_expiry = ide_dma_sff_timer_expiry,
335 .dma_clear = pdc202xx_reset,
336 .dma_sff_read_status = ide_dma_sff_read_status, 250 .dma_sff_read_status = ide_dma_sff_read_status,
337}; 251};
338 252
@@ -354,7 +268,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
354 .name = DRV_NAME, 268 .name = DRV_NAME,
355 .init_chipset = init_chipset_pdc202xx, 269 .init_chipset = init_chipset_pdc202xx,
356 .port_ops = &pdc20246_port_ops, 270 .port_ops = &pdc20246_port_ops,
357 .dma_ops = &pdc20246_dma_ops, 271 .dma_ops = &sff_dma_ops,
358 .host_flags = IDE_HFLAGS_PDC202XX, 272 .host_flags = IDE_HFLAGS_PDC202XX,
359 .pio_mask = ATA_PIO4, 273 .pio_mask = ATA_PIO4,
360 .mwdma_mask = ATA_MWDMA2, 274 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index f76e4e6b408f..97642a7a79c4 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1023,13 +1023,14 @@ static const struct ide_port_info pmac_port_info = {
1023 * Setup, register & probe an IDE channel driven by this driver, this is 1023 * Setup, register & probe an IDE channel driven by this driver, this is
1024 * called by one of the 2 probe functions (macio or PCI). 1024 * called by one of the 2 probe functions (macio or PCI).
1025 */ 1025 */
1026static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) 1026static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1027 struct ide_hw *hw)
1027{ 1028{
1028 struct device_node *np = pmif->node; 1029 struct device_node *np = pmif->node;
1029 const int *bidp; 1030 const int *bidp;
1030 struct ide_host *host; 1031 struct ide_host *host;
1031 ide_hwif_t *hwif; 1032 ide_hwif_t *hwif;
1032 hw_regs_t *hws[] = { hw, NULL, NULL, NULL }; 1033 struct ide_hw *hws[] = { hw };
1033 struct ide_port_info d = pmac_port_info; 1034 struct ide_port_info d = pmac_port_info;
1034 int rc; 1035 int rc;
1035 1036
@@ -1077,7 +1078,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
1077 /* Make sure we have sane timings */ 1078 /* Make sure we have sane timings */
1078 sanitize_timings(pmif); 1079 sanitize_timings(pmif);
1079 1080
1080 host = ide_host_alloc(&d, hws); 1081 host = ide_host_alloc(&d, hws, 1);
1081 if (host == NULL) 1082 if (host == NULL)
1082 return -ENOMEM; 1083 return -ENOMEM;
1083 hwif = host->ports[0]; 1084 hwif = host->ports[0];
@@ -1124,7 +1125,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
1124 return 0; 1125 return 0;
1125} 1126}
1126 1127
1127static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base) 1128static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
1128{ 1129{
1129 int i; 1130 int i;
1130 1131
@@ -1144,7 +1145,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1144 unsigned long regbase; 1145 unsigned long regbase;
1145 pmac_ide_hwif_t *pmif; 1146 pmac_ide_hwif_t *pmif;
1146 int irq, rc; 1147 int irq, rc;
1147 hw_regs_t hw; 1148 struct ide_hw hw;
1148 1149
1149 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); 1150 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1150 if (pmif == NULL) 1151 if (pmif == NULL)
@@ -1268,7 +1269,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1268 void __iomem *base; 1269 void __iomem *base;
1269 unsigned long rbase, rlen; 1270 unsigned long rbase, rlen;
1270 int rc; 1271 int rc;
1271 hw_regs_t hw; 1272 struct ide_hw hw;
1272 1273
1273 np = pci_device_to_OF_node(pdev); 1274 np = pci_device_to_OF_node(pdev);
1274 if (np == NULL) { 1275 if (np == NULL) {
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
index c79346679244..ab49a97023d9 100644
--- a/drivers/ide/q40ide.c
+++ b/drivers/ide/q40ide.c
@@ -51,11 +51,11 @@ static int q40ide_default_irq(unsigned long base)
51/* 51/*
52 * Addresses are pretranslated for Q40 ISA access. 52 * Addresses are pretranslated for Q40 ISA access.
53 */ 53 */
54static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base, 54static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base,
55 ide_ack_intr_t *ack_intr, 55 ide_ack_intr_t *ack_intr,
56 int irq) 56 int irq)
57{ 57{
58 memset(hw, 0, sizeof(hw_regs_t)); 58 memset(hw, 0, sizeof(*hw));
59 /* BIG FAT WARNING: 59 /* BIG FAT WARNING:
60 assumption: only DATA port is ever used in 16 bit mode */ 60 assumption: only DATA port is ever used in 16 bit mode */
61 hw->io_ports.data_addr = Q40_ISA_IO_W(base); 61 hw->io_ports.data_addr = Q40_ISA_IO_W(base);
@@ -70,8 +70,6 @@ static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base,
70 70
71 hw->irq = irq; 71 hw->irq = irq;
72 hw->ack_intr = ack_intr; 72 hw->ack_intr = ack_intr;
73
74 hw->chipset = ide_generic;
75} 73}
76 74
77static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, 75static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
@@ -119,6 +117,7 @@ static const struct ide_port_info q40ide_port_info = {
119 .tp_ops = &q40ide_tp_ops, 117 .tp_ops = &q40ide_tp_ops,
120 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 118 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
121 .irq_flags = IRQF_SHARED, 119 .irq_flags = IRQF_SHARED,
120 .chipset = ide_generic,
122}; 121};
123 122
124/* 123/*
@@ -136,7 +135,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
136static int __init q40ide_init(void) 135static int __init q40ide_init(void)
137{ 136{
138 int i; 137 int i;
139 hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; 138 struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
140 139
141 if (!MACH_IS_Q40) 140 if (!MACH_IS_Q40)
142 return -ENODEV; 141 return -ENODEV;
@@ -163,7 +162,7 @@ static int __init q40ide_init(void)
163 hws[i] = &hw[i]; 162 hws[i] = &hw[i];
164 } 163 }
165 164
166 return ide_host_add(&q40ide_port_info, hws, NULL); 165 return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
167} 166}
168 167
169module_init(q40ide_init); 168module_init(q40ide_init);
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
index d5003ca69801..00f54248f41f 100644
--- a/drivers/ide/rapide.c
+++ b/drivers/ide/rapide.c
@@ -13,9 +13,10 @@
13 13
14static const struct ide_port_info rapide_port_info = { 14static const struct ide_port_info rapide_port_info = {
15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
16 .chipset = ide_generic,
16}; 17};
17 18
18static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, 19static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
19 void __iomem *ctrl, unsigned int sz, int irq) 20 void __iomem *ctrl, unsigned int sz, int irq)
20{ 21{
21 unsigned long port = (unsigned long)base; 22 unsigned long port = (unsigned long)base;
@@ -35,7 +36,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
35 void __iomem *base; 36 void __iomem *base;
36 struct ide_host *host; 37 struct ide_host *host;
37 int ret; 38 int ret;
38 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 39 struct ide_hw hw, *hws[] = { &hw };
39 40
40 ret = ecard_request_resources(ec); 41 ret = ecard_request_resources(ec);
41 if (ret) 42 if (ret)
@@ -49,10 +50,9 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
49 50
50 memset(&hw, 0, sizeof(hw)); 51 memset(&hw, 0, sizeof(hw));
51 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); 52 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
52 hw.chipset = ide_generic;
53 hw.dev = &ec->dev; 53 hw.dev = &ec->dev;
54 54
55 ret = ide_host_add(&rapide_port_info, hws, &host); 55 ret = ide_host_add(&rapide_port_info, hws, 1, &host);
56 if (ret) 56 if (ret)
57 goto release; 57 goto release;
58 58
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 5be41f25204f..1104bb301eb9 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -559,7 +559,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
559{ 559{
560 struct scc_ports *ports = pci_get_drvdata(dev); 560 struct scc_ports *ports = pci_get_drvdata(dev);
561 struct ide_host *host; 561 struct ide_host *host;
562 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 562 struct ide_hw hw, *hws[] = { &hw };
563 int i, rc; 563 int i, rc;
564 564
565 memset(&hw, 0, sizeof(hw)); 565 memset(&hw, 0, sizeof(hw));
@@ -567,9 +567,8 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
567 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; 567 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
568 hw.irq = dev->irq; 568 hw.irq = dev->irq;
569 hw.dev = &dev->dev; 569 hw.dev = &dev->dev;
570 hw.chipset = ide_pci;
571 570
572 rc = ide_host_add(d, hws, &host); 571 rc = ide_host_add(d, hws, 1, &host);
573 if (rc) 572 if (rc)
574 return rc; 573 return rc;
575 574
@@ -823,6 +822,7 @@ static const struct ide_port_info scc_chipset __devinitdata = {
823 .host_flags = IDE_HFLAG_SINGLE, 822 .host_flags = IDE_HFLAG_SINGLE,
824 .irq_flags = IRQF_SHARED, 823 .irq_flags = IRQF_SHARED,
825 .pio_mask = ATA_PIO4, 824 .pio_mask = ATA_PIO4,
825 .chipset = ide_pci,
826}; 826};
827 827
828/** 828/**
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 7a3a12d6e638..ab3db61d2ba0 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 1995-1998 Mark Lord 3 * Copyright (C) 1995-1998 Mark Lord
4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * May be copied or modified under the terms of the GNU General Public License 6 * May be copied or modified under the terms of the GNU General Public License
7 */ 7 */
@@ -301,11 +301,11 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
301} 301}
302 302
303/** 303/**
304 * ide_hw_configure - configure a hw_regs_t instance 304 * ide_hw_configure - configure a struct ide_hw instance
305 * @dev: PCI device holding interface 305 * @dev: PCI device holding interface
306 * @d: IDE port info 306 * @d: IDE port info
307 * @port: port number 307 * @port: port number
308 * @hw: hw_regs_t instance corresponding to this port 308 * @hw: struct ide_hw instance corresponding to this port
309 * 309 *
310 * Perform the initial set up for the hardware interface structure. This 310 * Perform the initial set up for the hardware interface structure. This
311 * is done per interface port rather than per PCI device. There may be 311 * is done per interface port rather than per PCI device. There may be
@@ -315,7 +315,7 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
315 */ 315 */
316 316
317static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, 317static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
318 unsigned int port, hw_regs_t *hw) 318 unsigned int port, struct ide_hw *hw)
319{ 319{
320 unsigned long ctl = 0, base = 0; 320 unsigned long ctl = 0, base = 0;
321 321
@@ -344,7 +344,6 @@ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
344 344
345 memset(hw, 0, sizeof(*hw)); 345 memset(hw, 0, sizeof(*hw));
346 hw->dev = &dev->dev; 346 hw->dev = &dev->dev;
347 hw->chipset = d->chipset ? d->chipset : ide_pci;
348 ide_std_init_ports(hw, base, ctl | 2); 347 ide_std_init_ports(hw, base, ctl | 2);
349 348
350 return 0; 349 return 0;
@@ -446,8 +445,8 @@ out:
446 * ide_pci_setup_ports - configure ports/devices on PCI IDE 445 * ide_pci_setup_ports - configure ports/devices on PCI IDE
447 * @dev: PCI device 446 * @dev: PCI device
448 * @d: IDE port info 447 * @d: IDE port info
449 * @hw: hw_regs_t instances corresponding to this PCI IDE device 448 * @hw: struct ide_hw instances corresponding to this PCI IDE device
450 * @hws: hw_regs_t pointers table to update 449 * @hws: struct ide_hw pointers table to update
451 * 450 *
452 * Scan the interfaces attached to this device and do any 451 * Scan the interfaces attached to this device and do any
453 * necessary per port setup. Attach the devices and ask the 452 * necessary per port setup. Attach the devices and ask the
@@ -459,7 +458,7 @@ out:
459 */ 458 */
460 459
461void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, 460void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
462 hw_regs_t *hw, hw_regs_t **hws) 461 struct ide_hw *hw, struct ide_hw **hws)
463{ 462{
464 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; 463 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
465 u8 tmp; 464 u8 tmp;
@@ -535,61 +534,15 @@ out:
535 return ret; 534 return ret;
536} 535}
537 536
538int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
539 void *priv)
540{
541 struct ide_host *host;
542 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
543 int ret;
544
545 ret = ide_setup_pci_controller(dev, d, 1);
546 if (ret < 0)
547 goto out;
548
549 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
550
551 host = ide_host_alloc(d, hws);
552 if (host == NULL) {
553 ret = -ENOMEM;
554 goto out;
555 }
556
557 host->dev[0] = &dev->dev;
558
559 host->host_priv = priv;
560
561 host->irq_flags = IRQF_SHARED;
562
563 pci_set_drvdata(dev, host);
564
565 ret = do_ide_setup_pci_device(dev, d, 1);
566 if (ret < 0)
567 goto out;
568
569 /* fixup IRQ */
570 if (ide_pci_is_in_compatibility_mode(dev)) {
571 hw[0].irq = pci_get_legacy_ide_irq(dev, 0);
572 hw[1].irq = pci_get_legacy_ide_irq(dev, 1);
573 } else
574 hw[1].irq = hw[0].irq = ret;
575
576 ret = ide_host_register(host, d, hws);
577 if (ret)
578 ide_host_free(host);
579out:
580 return ret;
581}
582EXPORT_SYMBOL_GPL(ide_pci_init_one);
583
584int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, 537int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
585 const struct ide_port_info *d, void *priv) 538 const struct ide_port_info *d, void *priv)
586{ 539{
587 struct pci_dev *pdev[] = { dev1, dev2 }; 540 struct pci_dev *pdev[] = { dev1, dev2 };
588 struct ide_host *host; 541 struct ide_host *host;
589 int ret, i; 542 int ret, i, n_ports = dev2 ? 4 : 2;
590 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; 543 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
591 544
592 for (i = 0; i < 2; i++) { 545 for (i = 0; i < n_ports / 2; i++) {
593 ret = ide_setup_pci_controller(pdev[i], d, !i); 546 ret = ide_setup_pci_controller(pdev[i], d, !i);
594 if (ret < 0) 547 if (ret < 0)
595 goto out; 548 goto out;
@@ -597,23 +550,24 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
597 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); 550 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
598 } 551 }
599 552
600 host = ide_host_alloc(d, hws); 553 host = ide_host_alloc(d, hws, n_ports);
601 if (host == NULL) { 554 if (host == NULL) {
602 ret = -ENOMEM; 555 ret = -ENOMEM;
603 goto out; 556 goto out;
604 } 557 }
605 558
606 host->dev[0] = &dev1->dev; 559 host->dev[0] = &dev1->dev;
607 host->dev[1] = &dev2->dev; 560 if (dev2)
561 host->dev[1] = &dev2->dev;
608 562
609 host->host_priv = priv; 563 host->host_priv = priv;
610
611 host->irq_flags = IRQF_SHARED; 564 host->irq_flags = IRQF_SHARED;
612 565
613 pci_set_drvdata(pdev[0], host); 566 pci_set_drvdata(pdev[0], host);
614 pci_set_drvdata(pdev[1], host); 567 if (dev2)
568 pci_set_drvdata(pdev[1], host);
615 569
616 for (i = 0; i < 2; i++) { 570 for (i = 0; i < n_ports / 2; i++) {
617 ret = do_ide_setup_pci_device(pdev[i], d, !i); 571 ret = do_ide_setup_pci_device(pdev[i], d, !i);
618 572
619 /* 573 /*
@@ -639,6 +593,13 @@ out:
639} 593}
640EXPORT_SYMBOL_GPL(ide_pci_init_two); 594EXPORT_SYMBOL_GPL(ide_pci_init_two);
641 595
596int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
597 void *priv)
598{
599 return ide_pci_init_two(dev, NULL, d, priv);
600}
601EXPORT_SYMBOL_GPL(ide_pci_init_one);
602
642void ide_pci_remove(struct pci_dev *dev) 603void ide_pci_remove(struct pci_dev *dev)
643{ 604{
644 struct ide_host *host = pci_get_drvdata(dev); 605 struct ide_host *host = pci_get_drvdata(dev);
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index e5d2a48a84de..5f37f168f944 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -91,7 +91,7 @@ typedef struct {
91 91
92 92
93static void 93static void
94sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, 94sgiioc4_init_hwif_ports(struct ide_hw *hw, unsigned long data_port,
95 unsigned long ctrl_port, unsigned long irq_port) 95 unsigned long ctrl_port, unsigned long irq_port)
96{ 96{
97 unsigned long reg = data_port; 97 unsigned long reg = data_port;
@@ -546,7 +546,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
546 unsigned long cmd_base, irqport; 546 unsigned long cmd_base, irqport;
547 unsigned long bar0, cmd_phys_base, ctl; 547 unsigned long bar0, cmd_phys_base, ctl;
548 void __iomem *virt_base; 548 void __iomem *virt_base;
549 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 549 struct ide_hw hw, *hws[] = { &hw };
550 int rc; 550 int rc;
551 551
552 /* Get the CmdBlk and CtrlBlk Base Registers */ 552 /* Get the CmdBlk and CtrlBlk Base Registers */
@@ -575,13 +575,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
575 memset(&hw, 0, sizeof(hw)); 575 memset(&hw, 0, sizeof(hw));
576 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); 576 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
577 hw.irq = dev->irq; 577 hw.irq = dev->irq;
578 hw.chipset = ide_pci;
579 hw.dev = &dev->dev; 578 hw.dev = &dev->dev;
580 579
581 /* Initializing chipset IRQ Registers */ 580 /* Initializing chipset IRQ Registers */
582 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 581 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
583 582
584 rc = ide_host_add(&sgiioc4_port_info, hws, NULL); 583 rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL);
585 if (!rc) 584 if (!rc)
586 return 0; 585 return 0;
587 586
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index e4973cd1fba9..bd82d228608c 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -451,8 +451,8 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
451static void sil_sata_pre_reset(ide_drive_t *drive) 451static void sil_sata_pre_reset(ide_drive_t *drive)
452{ 452{
453 if (drive->media == ide_disk) { 453 if (drive->media == ide_disk) {
454 drive->special.b.set_geometry = 0; 454 drive->special_flags &=
455 drive->special.b.recalibrate = 0; 455 ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE);
456 } 456 }
457} 457}
458 458
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index b0a460625335..0924abff52ff 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -10,7 +10,7 @@
10 * with the timing registers setup. 10 * with the timing registers setup.
11 * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org 11 * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org
12 * 12 *
13 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> 13 * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. <source@mvista.com>
14 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 14 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
15 */ 15 */
16 16
@@ -146,14 +146,15 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
146 u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; 146 u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
147 u8 dma_cmd; 147 u8 dma_cmd;
148 148
149 printk("sl82c105: lost IRQ, resetting host\n"); 149 printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n");
150 150
151 /* 151 /*
152 * Check the raw interrupt from the drive. 152 * Check the raw interrupt from the drive.
153 */ 153 */
154 pci_read_config_dword(dev, 0x40, &val); 154 pci_read_config_dword(dev, 0x40, &val);
155 if (val & mask) 155 if (val & mask)
156 printk("sl82c105: drive was requesting IRQ, but host lost it\n"); 156 printk(KERN_INFO "sl82c105: drive was requesting IRQ, "
157 "but host lost it\n");
157 158
158 /* 159 /*
159 * Was DMA enabled? If so, disable it - we're resetting the 160 * Was DMA enabled? If so, disable it - we're resetting the
@@ -162,7 +163,7 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
162 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 163 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
163 if (dma_cmd & 1) { 164 if (dma_cmd & 1) {
164 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); 165 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
165 printk("sl82c105: DMA was enabled\n"); 166 printk(KERN_INFO "sl82c105: DMA was enabled\n");
166 } 167 }
167 168
168 sl82c105_reset_host(dev); 169 sl82c105_reset_host(dev);
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index e33d764e2945..ea89fddeed91 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -130,8 +130,7 @@ static const struct ide_port_info tx4938ide_port_info __initdata = {
130 130
131static int __init tx4938ide_probe(struct platform_device *pdev) 131static int __init tx4938ide_probe(struct platform_device *pdev)
132{ 132{
133 hw_regs_t hw; 133 struct ide_hw hw, *hws[] = { &hw };
134 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
135 struct ide_host *host; 134 struct ide_host *host;
136 struct resource *res; 135 struct resource *res;
137 struct tx4938ide_platform_info *pdata = pdev->dev.platform_data; 136 struct tx4938ide_platform_info *pdata = pdev->dev.platform_data;
@@ -183,7 +182,7 @@ static int __init tx4938ide_probe(struct platform_device *pdev)
183 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0); 182 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
184 else 183 else
185 d.port_ops = NULL; 184 d.port_ops = NULL;
186 ret = ide_host_add(&d, hws, &host); 185 ret = ide_host_add(&d, hws, 1, &host);
187 if (!ret) 186 if (!ret)
188 platform_set_drvdata(pdev, host); 187 platform_set_drvdata(pdev, host);
189 return ret; 188 return ret;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 5ca76224f6d1..64b58ecc3f0e 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -537,8 +537,7 @@ static const struct ide_port_info tx4939ide_port_info __initdata = {
537 537
538static int __init tx4939ide_probe(struct platform_device *pdev) 538static int __init tx4939ide_probe(struct platform_device *pdev)
539{ 539{
540 hw_regs_t hw; 540 struct ide_hw hw, *hws[] = { &hw };
541 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
542 struct ide_host *host; 541 struct ide_host *host;
543 struct resource *res; 542 struct resource *res;
544 int irq, ret; 543 int irq, ret;
@@ -581,7 +580,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
581 hw.dev = &pdev->dev; 580 hw.dev = &pdev->dev;
582 581
583 pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq); 582 pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
584 host = ide_host_alloc(&tx4939ide_port_info, hws); 583 host = ide_host_alloc(&tx4939ide_port_info, hws, 1);
585 if (!host) 584 if (!host)
586 return -ENOMEM; 585 return -ENOMEM;
587 /* use extra_base for base address of the all registers */ 586 /* use extra_base for base address of the all registers */
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 75223f50de58..0ba6ec876296 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
257{ 257{
258 struct iscsi_iser_task *iser_task = task->dd_data; 258 struct iscsi_iser_task *iser_task = task->dd_data;
259 259
260 /* 260 /* mgmt tasks do not need special cleanup */
261 * mgmt tasks do not need special cleanup and we do not 261 if (!task->sc)
262 * allocate anything in the init task callout
263 */
264 if (!task->sc || task->state == ISCSI_TASK_PENDING)
265 return; 262 return;
266 263
267 if (iser_task->status == ISER_TASK_STATUS_STARTED) { 264 if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
517} 514}
518 515
519static struct iscsi_endpoint * 516static struct iscsi_endpoint *
520iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) 517iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
518 int non_blocking)
521{ 519{
522 int err; 520 int err;
523 struct iser_conn *ib_conn; 521 struct iser_conn *ib_conn;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index da3c3a5d2689..c4b3fbd1a80f 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -192,7 +192,7 @@ config SERIO_RAW
192 192
193config SERIO_XILINX_XPS_PS2 193config SERIO_XILINX_XPS_PS2
194 tristate "Xilinx XPS PS/2 Controller Support" 194 tristate "Xilinx XPS PS/2 Controller Support"
195 depends on PPC 195 depends on PPC || MICROBLAZE
196 help 196 help
197 This driver supports XPS PS/2 IP from the Xilinx EDK on 197 This driver supports XPS PS/2 IP from the Xilinx EDK on
198 PowerPC platform. 198 PowerPC platform.
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index a3d3cbab359a..0aaa0597a622 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX 3 depends on X86_32 && EXPERIMENTAL && EVENTFD
4 select HVC_DRIVER 4 select HVC_DRIVER
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 4845fb3cf74b..a6974e9b8ebf 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -95,7 +95,7 @@ static __init int map_switcher(void)
95 * array of struct pages. It increments that pointer, but we don't 95 * array of struct pages. It increments that pointer, but we don't
96 * care. */ 96 * care. */
97 pagep = switcher_page; 97 pagep = switcher_page;
98 err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep); 98 err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
99 if (err) { 99 if (err) {
100 printk("lguest: map_vm_area failed: %i\n", err); 100 printk("lguest: map_vm_area failed: %i\n", err);
101 goto free_vma; 101 goto free_vma;
@@ -188,6 +188,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
188{ 188{
189 /* We stop running once the Guest is dead. */ 189 /* We stop running once the Guest is dead. */
190 while (!cpu->lg->dead) { 190 while (!cpu->lg->dead) {
191 unsigned int irq;
192 bool more;
193
191 /* First we run any hypercalls the Guest wants done. */ 194 /* First we run any hypercalls the Guest wants done. */
192 if (cpu->hcall) 195 if (cpu->hcall)
193 do_hypercalls(cpu); 196 do_hypercalls(cpu);
@@ -195,23 +198,23 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
195 /* It's possible the Guest did a NOTIFY hypercall to the 198 /* It's possible the Guest did a NOTIFY hypercall to the
196 * Launcher, in which case we return from the read() now. */ 199 * Launcher, in which case we return from the read() now. */
197 if (cpu->pending_notify) { 200 if (cpu->pending_notify) {
198 if (put_user(cpu->pending_notify, user)) 201 if (!send_notify_to_eventfd(cpu)) {
199 return -EFAULT; 202 if (put_user(cpu->pending_notify, user))
200 return sizeof(cpu->pending_notify); 203 return -EFAULT;
204 return sizeof(cpu->pending_notify);
205 }
201 } 206 }
202 207
203 /* Check for signals */ 208 /* Check for signals */
204 if (signal_pending(current)) 209 if (signal_pending(current))
205 return -ERESTARTSYS; 210 return -ERESTARTSYS;
206 211
207 /* If Waker set break_out, return to Launcher. */
208 if (cpu->break_out)
209 return -EAGAIN;
210
211 /* Check if there are any interrupts which can be delivered now: 212 /* Check if there are any interrupts which can be delivered now:
212 * if so, this sets up the hander to be executed when we next 213 * if so, this sets up the hander to be executed when we next
213 * run the Guest. */ 214 * run the Guest. */
214 maybe_do_interrupt(cpu); 215 irq = interrupt_pending(cpu, &more);
216 if (irq < LGUEST_IRQS)
217 try_deliver_interrupt(cpu, irq, more);
215 218
216 /* All long-lived kernel loops need to check with this horrible 219 /* All long-lived kernel loops need to check with this horrible
217 * thing called the freezer. If the Host is trying to suspend, 220 * thing called the freezer. If the Host is trying to suspend,
@@ -224,10 +227,15 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
224 break; 227 break;
225 228
226 /* If the Guest asked to be stopped, we sleep. The Guest's 229 /* If the Guest asked to be stopped, we sleep. The Guest's
227 * clock timer or LHREQ_BREAK from the Waker will wake us. */ 230 * clock timer will wake us. */
228 if (cpu->halted) { 231 if (cpu->halted) {
229 set_current_state(TASK_INTERRUPTIBLE); 232 set_current_state(TASK_INTERRUPTIBLE);
230 schedule(); 233 /* Just before we sleep, make sure no interrupt snuck in
234 * which we should be doing. */
235 if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
236 set_current_state(TASK_RUNNING);
237 else
238 schedule();
231 continue; 239 continue;
232 } 240 }
233 241
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 54d66f05fefa..c29ffa19cb74 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -37,6 +37,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
37 /* This call does nothing, except by breaking out of the Guest 37 /* This call does nothing, except by breaking out of the Guest
38 * it makes us process all the asynchronous hypercalls. */ 38 * it makes us process all the asynchronous hypercalls. */
39 break; 39 break;
40 case LHCALL_SEND_INTERRUPTS:
41 /* This call does nothing too, but by breaking out of the Guest
42 * it makes us process any pending interrupts. */
43 break;
40 case LHCALL_LGUEST_INIT: 44 case LHCALL_LGUEST_INIT:
41 /* You can't get here unless you're already initialized. Don't 45 /* You can't get here unless you're already initialized. Don't
42 * do that. */ 46 * do that. */
@@ -73,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
73 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); 77 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
74 break; 78 break;
75 case LHCALL_SET_PTE: 79 case LHCALL_SET_PTE:
80#ifdef CONFIG_X86_PAE
81 guest_set_pte(cpu, args->arg1, args->arg2,
82 __pte(args->arg3 | (u64)args->arg4 << 32));
83#else
76 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); 84 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
85#endif
86 break;
87 case LHCALL_SET_PGD:
88 guest_set_pgd(cpu->lg, args->arg1, args->arg2);
77 break; 89 break;
90#ifdef CONFIG_X86_PAE
78 case LHCALL_SET_PMD: 91 case LHCALL_SET_PMD:
79 guest_set_pmd(cpu->lg, args->arg1, args->arg2); 92 guest_set_pmd(cpu->lg, args->arg1, args->arg2);
80 break; 93 break;
94#endif
81 case LHCALL_SET_CLOCKEVENT: 95 case LHCALL_SET_CLOCKEVENT:
82 guest_set_clockevent(cpu, args->arg1); 96 guest_set_clockevent(cpu, args->arg1);
83 break; 97 break;
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 6e99adbe1946..0e9067b0d507 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -128,30 +128,39 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
128/*H:205 128/*H:205
129 * Virtual Interrupts. 129 * Virtual Interrupts.
130 * 130 *
131 * maybe_do_interrupt() gets called before every entry to the Guest, to see if 131 * interrupt_pending() returns the first pending interrupt which isn't blocked
132 * we should divert the Guest to running an interrupt handler. */ 132 * by the Guest. It is called before every entry to the Guest, and just before
133void maybe_do_interrupt(struct lg_cpu *cpu) 133 * we go to sleep when the Guest has halted itself. */
134unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
134{ 135{
135 unsigned int irq; 136 unsigned int irq;
136 DECLARE_BITMAP(blk, LGUEST_IRQS); 137 DECLARE_BITMAP(blk, LGUEST_IRQS);
137 struct desc_struct *idt;
138 138
139 /* If the Guest hasn't even initialized yet, we can do nothing. */ 139 /* If the Guest hasn't even initialized yet, we can do nothing. */
140 if (!cpu->lg->lguest_data) 140 if (!cpu->lg->lguest_data)
141 return; 141 return LGUEST_IRQS;
142 142
143 /* Take our "irqs_pending" array and remove any interrupts the Guest 143 /* Take our "irqs_pending" array and remove any interrupts the Guest
144 * wants blocked: the result ends up in "blk". */ 144 * wants blocked: the result ends up in "blk". */
145 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, 145 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
146 sizeof(blk))) 146 sizeof(blk)))
147 return; 147 return LGUEST_IRQS;
148 bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); 148 bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
149 149
150 /* Find the first interrupt. */ 150 /* Find the first interrupt. */
151 irq = find_first_bit(blk, LGUEST_IRQS); 151 irq = find_first_bit(blk, LGUEST_IRQS);
152 /* None? Nothing to do */ 152 *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
153 if (irq >= LGUEST_IRQS) 153
154 return; 154 return irq;
155}
156
157/* This actually diverts the Guest to running an interrupt handler, once an
158 * interrupt has been identified by interrupt_pending(). */
159void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
160{
161 struct desc_struct *idt;
162
163 BUG_ON(irq >= LGUEST_IRQS);
155 164
156 /* They may be in the middle of an iret, where they asked us never to 165 /* They may be in the middle of an iret, where they asked us never to
157 * deliver interrupts. */ 166 * deliver interrupts. */
@@ -170,8 +179,12 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
170 u32 irq_enabled; 179 u32 irq_enabled;
171 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) 180 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
172 irq_enabled = 0; 181 irq_enabled = 0;
173 if (!irq_enabled) 182 if (!irq_enabled) {
183 /* Make sure they know an IRQ is pending. */
184 put_user(X86_EFLAGS_IF,
185 &cpu->lg->lguest_data->irq_pending);
174 return; 186 return;
187 }
175 } 188 }
176 189
177 /* Look at the IDT entry the Guest gave us for this interrupt. The 190 /* Look at the IDT entry the Guest gave us for this interrupt. The
@@ -194,6 +207,25 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
194 * here is a compromise which means at least it gets updated every 207 * here is a compromise which means at least it gets updated every
195 * timer interrupt. */ 208 * timer interrupt. */
196 write_timestamp(cpu); 209 write_timestamp(cpu);
210
211 /* If there are no other interrupts we want to deliver, clear
212 * the pending flag. */
213 if (!more)
214 put_user(0, &cpu->lg->lguest_data->irq_pending);
215}
216
217/* And this is the routine when we want to set an interrupt for the Guest. */
218void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
219{
220 /* Next time the Guest runs, the core code will see if it can deliver
221 * this interrupt. */
222 set_bit(irq, cpu->irqs_pending);
223
224 /* Make sure it sees it; it might be asleep (eg. halted), or
225 * running the Guest right now, in which case kick_process()
226 * will knock it out. */
227 if (!wake_up_process(cpu->tsk))
228 kick_process(cpu->tsk);
197} 229}
198/*:*/ 230/*:*/
199 231
@@ -510,10 +542,7 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
510 struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); 542 struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
511 543
512 /* Remember the first interrupt is the timer interrupt. */ 544 /* Remember the first interrupt is the timer interrupt. */
513 set_bit(0, cpu->irqs_pending); 545 set_interrupt(cpu, 0);
514 /* If the Guest is actually stopped, we need to wake it up. */
515 if (cpu->halted)
516 wake_up_process(cpu->tsk);
517 return HRTIMER_NORESTART; 546 return HRTIMER_NORESTART;
518} 547}
519 548
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index af92a176697f..d4e8979735cb 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -49,7 +49,7 @@ struct lg_cpu {
49 u32 cr2; 49 u32 cr2;
50 int ts; 50 int ts;
51 u32 esp1; 51 u32 esp1;
52 u8 ss1; 52 u16 ss1;
53 53
54 /* Bitmap of what has changed: see CHANGED_* above. */ 54 /* Bitmap of what has changed: see CHANGED_* above. */
55 int changed; 55 int changed;
@@ -71,9 +71,7 @@ struct lg_cpu {
71 /* Virtual clock device */ 71 /* Virtual clock device */
72 struct hrtimer hrt; 72 struct hrtimer hrt;
73 73
74 /* Do we need to stop what we're doing and return to userspace? */ 74 /* Did the Guest tell us to halt? */
75 int break_out;
76 wait_queue_head_t break_wq;
77 int halted; 75 int halted;
78 76
79 /* Pending virtual interrupts */ 77 /* Pending virtual interrupts */
@@ -82,6 +80,16 @@ struct lg_cpu {
82 struct lg_cpu_arch arch; 80 struct lg_cpu_arch arch;
83}; 81};
84 82
83struct lg_eventfd {
84 unsigned long addr;
85 struct file *event;
86};
87
88struct lg_eventfd_map {
89 unsigned int num;
90 struct lg_eventfd map[];
91};
92
85/* The private info the thread maintains about the guest. */ 93/* The private info the thread maintains about the guest. */
86struct lguest 94struct lguest
87{ 95{
@@ -102,6 +110,8 @@ struct lguest
102 unsigned int stack_pages; 110 unsigned int stack_pages;
103 u32 tsc_khz; 111 u32 tsc_khz;
104 112
113 struct lg_eventfd_map *eventfds;
114
105 /* Dead? */ 115 /* Dead? */
106 const char *dead; 116 const char *dead;
107}; 117};
@@ -137,9 +147,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
137 * in the kernel. */ 147 * in the kernel. */
138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) 148#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
139#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) 149#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
150#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
151#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
140 152
141/* interrupts_and_traps.c: */ 153/* interrupts_and_traps.c: */
142void maybe_do_interrupt(struct lg_cpu *cpu); 154unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
155void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
156void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
143bool deliver_trap(struct lg_cpu *cpu, unsigned int num); 157bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
144void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, 158void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
145 u32 low, u32 hi); 159 u32 low, u32 hi);
@@ -150,6 +164,7 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
150void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, 164void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
151 const unsigned long *def); 165 const unsigned long *def);
152void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); 166void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
167bool send_notify_to_eventfd(struct lg_cpu *cpu);
153void init_clockdev(struct lg_cpu *cpu); 168void init_clockdev(struct lg_cpu *cpu);
154bool check_syscall_vector(struct lguest *lg); 169bool check_syscall_vector(struct lguest *lg);
155int init_interrupts(void); 170int init_interrupts(void);
@@ -168,7 +183,10 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
168int init_guest_pagetable(struct lguest *lg); 183int init_guest_pagetable(struct lguest *lg);
169void free_guest_pagetable(struct lguest *lg); 184void free_guest_pagetable(struct lguest *lg);
170void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); 185void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
186void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
187#ifdef CONFIG_X86_PAE
171void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); 188void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
189#endif
172void guest_pagetable_clear_all(struct lg_cpu *cpu); 190void guest_pagetable_clear_all(struct lg_cpu *cpu);
173void guest_pagetable_flush_user(struct lg_cpu *cpu); 191void guest_pagetable_flush_user(struct lg_cpu *cpu);
174void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, 192void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index df44d962626d..e082cdac88b4 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq);
228 * function. */ 228 * function. */
229static struct virtqueue *lg_find_vq(struct virtio_device *vdev, 229static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
230 unsigned index, 230 unsigned index,
231 void (*callback)(struct virtqueue *vq)) 231 void (*callback)(struct virtqueue *vq),
232 const char *name)
232{ 233{
233 struct lguest_device *ldev = to_lgdev(vdev); 234 struct lguest_device *ldev = to_lgdev(vdev);
234 struct lguest_vq_info *lvq; 235 struct lguest_vq_info *lvq;
@@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
263 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size 264 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size
264 * and we've got a pointer to its pages. */ 265 * and we've got a pointer to its pages. */
265 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, 266 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
266 vdev, lvq->pages, lg_notify, callback); 267 vdev, lvq->pages, lg_notify, callback, name);
267 if (!vq) { 268 if (!vq) {
268 err = -ENOMEM; 269 err = -ENOMEM;
269 goto unmap; 270 goto unmap;
@@ -312,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq)
312 kfree(lvq); 313 kfree(lvq);
313} 314}
314 315
316static void lg_del_vqs(struct virtio_device *vdev)
317{
318 struct virtqueue *vq, *n;
319
320 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
321 lg_del_vq(vq);
322}
323
324static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
325 struct virtqueue *vqs[],
326 vq_callback_t *callbacks[],
327 const char *names[])
328{
329 struct lguest_device *ldev = to_lgdev(vdev);
330 int i;
331
332 /* We must have this many virtqueues. */
333 if (nvqs > ldev->desc->num_vq)
334 return -ENOENT;
335
336 for (i = 0; i < nvqs; ++i) {
337 vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
338 if (IS_ERR(vqs[i]))
339 goto error;
340 }
341 return 0;
342
343error:
344 lg_del_vqs(vdev);
345 return PTR_ERR(vqs[i]);
346}
347
315/* The ops structure which hooks everything together. */ 348/* The ops structure which hooks everything together. */
316static struct virtio_config_ops lguest_config_ops = { 349static struct virtio_config_ops lguest_config_ops = {
317 .get_features = lg_get_features, 350 .get_features = lg_get_features,
@@ -321,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = {
321 .get_status = lg_get_status, 354 .get_status = lg_get_status,
322 .set_status = lg_set_status, 355 .set_status = lg_set_status,
323 .reset = lg_reset, 356 .reset = lg_reset,
324 .find_vq = lg_find_vq, 357 .find_vqs = lg_find_vqs,
325 .del_vq = lg_del_vq, 358 .del_vqs = lg_del_vqs,
326}; 359};
327 360
328/* The root device for the lguest virtio devices. This makes them appear as 361/* The root device for the lguest virtio devices. This makes them appear as
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b8ee103eed5f..32e297121058 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -7,32 +7,83 @@
7#include <linux/miscdevice.h> 7#include <linux/miscdevice.h>
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/eventfd.h>
11#include <linux/file.h>
10#include "lg.h" 12#include "lg.h"
11 13
12/*L:055 When something happens, the Waker process needs a way to stop the 14bool send_notify_to_eventfd(struct lg_cpu *cpu)
13 * kernel running the Guest and return to the Launcher. So the Waker writes
14 * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher
15 * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release
16 * the Waker. */
17static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input)
18{ 15{
19 unsigned long on; 16 unsigned int i;
17 struct lg_eventfd_map *map;
18
19 /* lg->eventfds is RCU-protected */
20 rcu_read_lock();
21 map = rcu_dereference(cpu->lg->eventfds);
22 for (i = 0; i < map->num; i++) {
23 if (map->map[i].addr == cpu->pending_notify) {
24 eventfd_signal(map->map[i].event, 1);
25 cpu->pending_notify = 0;
26 break;
27 }
28 }
29 rcu_read_unlock();
30 return cpu->pending_notify == 0;
31}
20 32
21 /* Fetch whether they're turning break on or off. */ 33static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
22 if (get_user(on, input) != 0) 34{
23 return -EFAULT; 35 struct lg_eventfd_map *new, *old = lg->eventfds;
24 36
25 if (on) { 37 if (!addr)
26 cpu->break_out = 1; 38 return -EINVAL;
27 /* Pop it out of the Guest (may be running on different CPU) */ 39
28 wake_up_process(cpu->tsk); 40 /* Replace the old array with the new one, carefully: others can
29 /* Wait for them to reset it */ 41 * be accessing it at the same time */
30 return wait_event_interruptible(cpu->break_wq, !cpu->break_out); 42 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
31 } else { 43 GFP_KERNEL);
32 cpu->break_out = 0; 44 if (!new)
33 wake_up(&cpu->break_wq); 45 return -ENOMEM;
34 return 0; 46
47 /* First make identical copy. */
48 memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
49 new->num = old->num;
50
51 /* Now append new entry. */
52 new->map[new->num].addr = addr;
53 new->map[new->num].event = eventfd_fget(fd);
54 if (IS_ERR(new->map[new->num].event)) {
55 kfree(new);
56 return PTR_ERR(new->map[new->num].event);
35 } 57 }
58 new->num++;
59
60 /* Now put new one in place. */
61 rcu_assign_pointer(lg->eventfds, new);
62
63 /* We're not in a big hurry. Wait until noone's looking at old
64 * version, then delete it. */
65 synchronize_rcu();
66 kfree(old);
67
68 return 0;
69}
70
71static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
72{
73 unsigned long addr, fd;
74 int err;
75
76 if (get_user(addr, input) != 0)
77 return -EFAULT;
78 input++;
79 if (get_user(fd, input) != 0)
80 return -EFAULT;
81
82 mutex_lock(&lguest_lock);
83 err = add_eventfd(lg, addr, fd);
84 mutex_unlock(&lguest_lock);
85
86 return 0;
36} 87}
37 88
38/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt 89/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
@@ -45,9 +96,8 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
45 return -EFAULT; 96 return -EFAULT;
46 if (irq >= LGUEST_IRQS) 97 if (irq >= LGUEST_IRQS)
47 return -EINVAL; 98 return -EINVAL;
48 /* Next time the Guest runs, the core code will see if it can deliver 99
49 * this interrupt. */ 100 set_interrupt(cpu, irq);
50 set_bit(irq, cpu->irqs_pending);
51 return 0; 101 return 0;
52} 102}
53 103
@@ -126,9 +176,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
126 * address. */ 176 * address. */
127 lguest_arch_setup_regs(cpu, start_ip); 177 lguest_arch_setup_regs(cpu, start_ip);
128 178
129 /* Initialize the queue for the Waker to wait on */
130 init_waitqueue_head(&cpu->break_wq);
131
132 /* We keep a pointer to the Launcher task (ie. current task) for when 179 /* We keep a pointer to the Launcher task (ie. current task) for when
133 * other Guests want to wake this one (eg. console input). */ 180 * other Guests want to wake this one (eg. console input). */
134 cpu->tsk = current; 181 cpu->tsk = current;
@@ -185,6 +232,13 @@ static int initialize(struct file *file, const unsigned long __user *input)
185 goto unlock; 232 goto unlock;
186 } 233 }
187 234
235 lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
236 if (!lg->eventfds) {
237 err = -ENOMEM;
238 goto free_lg;
239 }
240 lg->eventfds->num = 0;
241
188 /* Populate the easy fields of our "struct lguest" */ 242 /* Populate the easy fields of our "struct lguest" */
189 lg->mem_base = (void __user *)args[0]; 243 lg->mem_base = (void __user *)args[0];
190 lg->pfn_limit = args[1]; 244 lg->pfn_limit = args[1];
@@ -192,7 +246,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
192 /* This is the first cpu (cpu 0) and it will start booting at args[2] */ 246 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
193 err = lg_cpu_start(&lg->cpus[0], 0, args[2]); 247 err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
194 if (err) 248 if (err)
195 goto release_guest; 249 goto free_eventfds;
196 250
197 /* Initialize the Guest's shadow page tables, using the toplevel 251 /* Initialize the Guest's shadow page tables, using the toplevel
198 * address the Launcher gave us. This allocates memory, so can fail. */ 252 * address the Launcher gave us. This allocates memory, so can fail. */
@@ -211,7 +265,9 @@ static int initialize(struct file *file, const unsigned long __user *input)
211free_regs: 265free_regs:
212 /* FIXME: This should be in free_vcpu */ 266 /* FIXME: This should be in free_vcpu */
213 free_page(lg->cpus[0].regs_page); 267 free_page(lg->cpus[0].regs_page);
214release_guest: 268free_eventfds:
269 kfree(lg->eventfds);
270free_lg:
215 kfree(lg); 271 kfree(lg);
216unlock: 272unlock:
217 mutex_unlock(&lguest_lock); 273 mutex_unlock(&lguest_lock);
@@ -252,11 +308,6 @@ static ssize_t write(struct file *file, const char __user *in,
252 /* Once the Guest is dead, you can only read() why it died. */ 308 /* Once the Guest is dead, you can only read() why it died. */
253 if (lg->dead) 309 if (lg->dead)
254 return -ENOENT; 310 return -ENOENT;
255
256 /* If you're not the task which owns the Guest, all you can do
257 * is break the Launcher out of running the Guest. */
258 if (current != cpu->tsk && req != LHREQ_BREAK)
259 return -EPERM;
260 } 311 }
261 312
262 switch (req) { 313 switch (req) {
@@ -264,8 +315,8 @@ static ssize_t write(struct file *file, const char __user *in,
264 return initialize(file, input); 315 return initialize(file, input);
265 case LHREQ_IRQ: 316 case LHREQ_IRQ:
266 return user_send_irq(cpu, input); 317 return user_send_irq(cpu, input);
267 case LHREQ_BREAK: 318 case LHREQ_EVENTFD:
268 return break_guest_out(cpu, input); 319 return attach_eventfd(lg, input);
269 default: 320 default:
270 return -EINVAL; 321 return -EINVAL;
271 } 322 }
@@ -303,6 +354,12 @@ static int close(struct inode *inode, struct file *file)
303 * the Launcher's memory management structure. */ 354 * the Launcher's memory management structure. */
304 mmput(lg->cpus[i].mm); 355 mmput(lg->cpus[i].mm);
305 } 356 }
357
358 /* Release any eventfds they registered. */
359 for (i = 0; i < lg->eventfds->num; i++)
360 fput(lg->eventfds->map[i].event);
361 kfree(lg->eventfds);
362
306 /* If lg->dead doesn't contain an error code it will be NULL or a 363 /* If lg->dead doesn't contain an error code it will be NULL or a
307 * kmalloc()ed string, either of which is ok to hand to kfree(). */ 364 * kmalloc()ed string, either of which is ok to hand to kfree(). */
308 if (!IS_ERR(lg->dead)) 365 if (!IS_ERR(lg->dead))
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a059cf9980f7..a6fe1abda240 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -53,6 +53,17 @@
53 * page. */ 53 * page. */
54#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) 54#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
55 55
56/* For PAE we need the PMD index as well. We use the last 2MB, so we
57 * will need the last pmd entry of the last pmd page. */
58#ifdef CONFIG_X86_PAE
59#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
60#define RESERVE_MEM 2U
61#define CHECK_GPGD_MASK _PAGE_PRESENT
62#else
63#define RESERVE_MEM 4U
64#define CHECK_GPGD_MASK _PAGE_TABLE
65#endif
66
56/* We actually need a separate PTE page for each CPU. Remember that after the 67/* We actually need a separate PTE page for each CPU. Remember that after the
57 * Switcher code itself comes two pages for each CPU, and we don't want this 68 * Switcher code itself comes two pages for each CPU, and we don't want this
58 * CPU's guest to see the pages of any other CPU. */ 69 * CPU's guest to see the pages of any other CPU. */
@@ -73,24 +84,59 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
73{ 84{
74 unsigned int index = pgd_index(vaddr); 85 unsigned int index = pgd_index(vaddr);
75 86
87#ifndef CONFIG_X86_PAE
76 /* We kill any Guest trying to touch the Switcher addresses. */ 88 /* We kill any Guest trying to touch the Switcher addresses. */
77 if (index >= SWITCHER_PGD_INDEX) { 89 if (index >= SWITCHER_PGD_INDEX) {
78 kill_guest(cpu, "attempt to access switcher pages"); 90 kill_guest(cpu, "attempt to access switcher pages");
79 index = 0; 91 index = 0;
80 } 92 }
93#endif
81 /* Return a pointer index'th pgd entry for the i'th page table. */ 94 /* Return a pointer index'th pgd entry for the i'th page table. */
82 return &cpu->lg->pgdirs[i].pgdir[index]; 95 return &cpu->lg->pgdirs[i].pgdir[index];
83} 96}
84 97
98#ifdef CONFIG_X86_PAE
99/* This routine then takes the PGD entry given above, which contains the
100 * address of the PMD page. It then returns a pointer to the PMD entry for the
101 * given address. */
102static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
103{
104 unsigned int index = pmd_index(vaddr);
105 pmd_t *page;
106
107 /* We kill any Guest trying to touch the Switcher addresses. */
108 if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
109 index >= SWITCHER_PMD_INDEX) {
110 kill_guest(cpu, "attempt to access switcher pages");
111 index = 0;
112 }
113
114 /* You should never call this if the PGD entry wasn't valid */
115 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
116 page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
117
118 return &page[index];
119}
120#endif
121
85/* This routine then takes the page directory entry returned above, which 122/* This routine then takes the page directory entry returned above, which
86 * contains the address of the page table entry (PTE) page. It then returns a 123 * contains the address of the page table entry (PTE) page. It then returns a
87 * pointer to the PTE entry for the given address. */ 124 * pointer to the PTE entry for the given address. */
88static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) 125static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
89{ 126{
127#ifdef CONFIG_X86_PAE
128 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
129 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
130
131 /* You should never call this if the PMD entry wasn't valid */
132 BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
133#else
90 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); 134 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
91 /* You should never call this if the PGD entry wasn't valid */ 135 /* You should never call this if the PGD entry wasn't valid */
92 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); 136 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
93 return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; 137#endif
138
139 return &page[pte_index(vaddr)];
94} 140}
95 141
96/* These two functions just like the above two, except they access the Guest 142/* These two functions just like the above two, except they access the Guest
@@ -101,12 +147,32 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
101 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); 147 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
102} 148}
103 149
104static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) 150#ifdef CONFIG_X86_PAE
151static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
152{
153 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
154 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
155 return gpage + pmd_index(vaddr) * sizeof(pmd_t);
156}
157
158static unsigned long gpte_addr(struct lg_cpu *cpu,
159 pmd_t gpmd, unsigned long vaddr)
160{
161 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
162
163 BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
164 return gpage + pte_index(vaddr) * sizeof(pte_t);
165}
166#else
167static unsigned long gpte_addr(struct lg_cpu *cpu,
168 pgd_t gpgd, unsigned long vaddr)
105{ 169{
106 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 170 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
171
107 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 172 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
108 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); 173 return gpage + pte_index(vaddr) * sizeof(pte_t);
109} 174}
175#endif
110/*:*/ 176/*:*/
111 177
112/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as 178/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
@@ -171,7 +237,7 @@ static void release_pte(pte_t pte)
171 /* Remember that get_user_pages_fast() took a reference to the page, in 237 /* Remember that get_user_pages_fast() took a reference to the page, in
172 * get_pfn()? We have to put it back now. */ 238 * get_pfn()? We have to put it back now. */
173 if (pte_flags(pte) & _PAGE_PRESENT) 239 if (pte_flags(pte) & _PAGE_PRESENT)
174 put_page(pfn_to_page(pte_pfn(pte))); 240 put_page(pte_page(pte));
175} 241}
176/*:*/ 242/*:*/
177 243
@@ -184,11 +250,20 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
184 250
185static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) 251static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
186{ 252{
187 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || 253 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
188 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) 254 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
189 kill_guest(cpu, "bad page directory entry"); 255 kill_guest(cpu, "bad page directory entry");
190} 256}
191 257
258#ifdef CONFIG_X86_PAE
259static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
260{
261 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
262 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
263 kill_guest(cpu, "bad page middle directory entry");
264}
265#endif
266
192/*H:330 267/*H:330
193 * (i) Looking up a page table entry when the Guest faults. 268 * (i) Looking up a page table entry when the Guest faults.
194 * 269 *
@@ -207,6 +282,11 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
207 pte_t gpte; 282 pte_t gpte;
208 pte_t *spte; 283 pte_t *spte;
209 284
285#ifdef CONFIG_X86_PAE
286 pmd_t *spmd;
287 pmd_t gpmd;
288#endif
289
210 /* First step: get the top-level Guest page table entry. */ 290 /* First step: get the top-level Guest page table entry. */
211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 291 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
212 /* Toplevel not present? We can't map it in. */ 292 /* Toplevel not present? We can't map it in. */
@@ -228,12 +308,45 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
228 check_gpgd(cpu, gpgd); 308 check_gpgd(cpu, gpgd);
229 /* And we copy the flags to the shadow PGD entry. The page 309 /* And we copy the flags to the shadow PGD entry. The page
230 * number in the shadow PGD is the page we just allocated. */ 310 * number in the shadow PGD is the page we just allocated. */
231 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); 311 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
232 } 312 }
233 313
314#ifdef CONFIG_X86_PAE
315 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
316 /* middle level not present? We can't map it in. */
317 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
318 return false;
319
320 /* Now look at the matching shadow entry. */
321 spmd = spmd_addr(cpu, *spgd, vaddr);
322
323 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
324 /* No shadow entry: allocate a new shadow PTE page. */
325 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
326
327 /* This is not really the Guest's fault, but killing it is
328 * simple for this corner case. */
329 if (!ptepage) {
330 kill_guest(cpu, "out of memory allocating pte page");
331 return false;
332 }
333
334 /* We check that the Guest pmd is OK. */
335 check_gpmd(cpu, gpmd);
336
337 /* And we copy the flags to the shadow PMD entry. The page
338 * number in the shadow PMD is the page we just allocated. */
339 native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
340 }
341
342 /* OK, now we look at the lower level in the Guest page table: keep its
343 * address, because we might update it later. */
344 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
345#else
234 /* OK, now we look at the lower level in the Guest page table: keep its 346 /* OK, now we look at the lower level in the Guest page table: keep its
235 * address, because we might update it later. */ 347 * address, because we might update it later. */
236 gpte_ptr = gpte_addr(gpgd, vaddr); 348 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
349#endif
237 gpte = lgread(cpu, gpte_ptr, pte_t); 350 gpte = lgread(cpu, gpte_ptr, pte_t);
238 351
239 /* If this page isn't in the Guest page tables, we can't page it in. */ 352 /* If this page isn't in the Guest page tables, we can't page it in. */
@@ -259,7 +372,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
259 gpte = pte_mkdirty(gpte); 372 gpte = pte_mkdirty(gpte);
260 373
261 /* Get the pointer to the shadow PTE entry we're going to set. */ 374 /* Get the pointer to the shadow PTE entry we're going to set. */
262 spte = spte_addr(*spgd, vaddr); 375 spte = spte_addr(cpu, *spgd, vaddr);
263 /* If there was a valid shadow PTE entry here before, we release it. 376 /* If there was a valid shadow PTE entry here before, we release it.
264 * This can happen with a write to a previously read-only entry. */ 377 * This can happen with a write to a previously read-only entry. */
265 release_pte(*spte); 378 release_pte(*spte);
@@ -273,7 +386,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
273 * table entry, even if the Guest says it's writable. That way 386 * table entry, even if the Guest says it's writable. That way
274 * we will come back here when a write does actually occur, so 387 * we will come back here when a write does actually occur, so
275 * we can update the Guest's _PAGE_DIRTY flag. */ 388 * we can update the Guest's _PAGE_DIRTY flag. */
276 *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); 389 native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
277 390
278 /* Finally, we write the Guest PTE entry back: we've set the 391 /* Finally, we write the Guest PTE entry back: we've set the
279 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 392 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
@@ -301,14 +414,23 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
301 pgd_t *spgd; 414 pgd_t *spgd;
302 unsigned long flags; 415 unsigned long flags;
303 416
417#ifdef CONFIG_X86_PAE
418 pmd_t *spmd;
419#endif
304 /* Look at the current top level entry: is it present? */ 420 /* Look at the current top level entry: is it present? */
305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 421 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 422 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
307 return false; 423 return false;
308 424
425#ifdef CONFIG_X86_PAE
426 spmd = spmd_addr(cpu, *spgd, vaddr);
427 if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
428 return false;
429#endif
430
309 /* Check the flags on the pte entry itself: it must be present and 431 /* Check the flags on the pte entry itself: it must be present and
310 * writable. */ 432 * writable. */
311 flags = pte_flags(*(spte_addr(*spgd, vaddr))); 433 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
312 434
313 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 435 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
314} 436}
@@ -322,8 +444,43 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
322 kill_guest(cpu, "bad stack page %#lx", vaddr); 444 kill_guest(cpu, "bad stack page %#lx", vaddr);
323} 445}
324 446
447#ifdef CONFIG_X86_PAE
448static void release_pmd(pmd_t *spmd)
449{
450 /* If the entry's not present, there's nothing to release. */
451 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
452 unsigned int i;
453 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
454 /* For each entry in the page, we might need to release it. */
455 for (i = 0; i < PTRS_PER_PTE; i++)
456 release_pte(ptepage[i]);
457 /* Now we can free the page of PTEs */
458 free_page((long)ptepage);
459 /* And zero out the PMD entry so we never release it twice. */
460 native_set_pmd(spmd, __pmd(0));
461 }
462}
463
464static void release_pgd(pgd_t *spgd)
465{
466 /* If the entry's not present, there's nothing to release. */
467 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
468 unsigned int i;
469 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
470
471 for (i = 0; i < PTRS_PER_PMD; i++)
472 release_pmd(&pmdpage[i]);
473
474 /* Now we can free the page of PMDs */
475 free_page((long)pmdpage);
476 /* And zero out the PGD entry so we never release it twice. */
477 set_pgd(spgd, __pgd(0));
478 }
479}
480
481#else /* !CONFIG_X86_PAE */
325/*H:450 If we chase down the release_pgd() code, it looks like this: */ 482/*H:450 If we chase down the release_pgd() code, it looks like this: */
326static void release_pgd(struct lguest *lg, pgd_t *spgd) 483static void release_pgd(pgd_t *spgd)
327{ 484{
328 /* If the entry's not present, there's nothing to release. */ 485 /* If the entry's not present, there's nothing to release. */
329 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 486 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -341,7 +498,7 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd)
341 *spgd = __pgd(0); 498 *spgd = __pgd(0);
342 } 499 }
343} 500}
344 501#endif
345/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() 502/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
346 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. 503 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
347 * It simply releases every PTE page from 0 up to the Guest's kernel address. */ 504 * It simply releases every PTE page from 0 up to the Guest's kernel address. */
@@ -350,7 +507,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
350 unsigned int i; 507 unsigned int i;
351 /* Release every pgd entry up to the kernel's address. */ 508 /* Release every pgd entry up to the kernel's address. */
352 for (i = 0; i < pgd_index(lg->kernel_address); i++) 509 for (i = 0; i < pgd_index(lg->kernel_address); i++)
353 release_pgd(lg, lg->pgdirs[idx].pgdir + i); 510 release_pgd(lg->pgdirs[idx].pgdir + i);
354} 511}
355 512
356/*H:440 (v) Flushing (throwing away) page tables, 513/*H:440 (v) Flushing (throwing away) page tables,
@@ -369,7 +526,9 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
369{ 526{
370 pgd_t gpgd; 527 pgd_t gpgd;
371 pte_t gpte; 528 pte_t gpte;
372 529#ifdef CONFIG_X86_PAE
530 pmd_t gpmd;
531#endif
373 /* First step: get the top-level Guest page table entry. */ 532 /* First step: get the top-level Guest page table entry. */
374 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 533 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
375 /* Toplevel not present? We can't map it in. */ 534 /* Toplevel not present? We can't map it in. */
@@ -378,7 +537,14 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
378 return -1UL; 537 return -1UL;
379 } 538 }
380 539
381 gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); 540#ifdef CONFIG_X86_PAE
541 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
542 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
543 kill_guest(cpu, "Bad address %#lx", vaddr);
544 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
545#else
546 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
547#endif
382 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 548 if (!(pte_flags(gpte) & _PAGE_PRESENT))
383 kill_guest(cpu, "Bad address %#lx", vaddr); 549 kill_guest(cpu, "Bad address %#lx", vaddr);
384 550
@@ -405,6 +571,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
405 int *blank_pgdir) 571 int *blank_pgdir)
406{ 572{
407 unsigned int next; 573 unsigned int next;
574#ifdef CONFIG_X86_PAE
575 pmd_t *pmd_table;
576#endif
408 577
409 /* We pick one entry at random to throw out. Choosing the Least 578 /* We pick one entry at random to throw out. Choosing the Least
410 * Recently Used might be better, but this is easy. */ 579 * Recently Used might be better, but this is easy. */
@@ -416,10 +585,27 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
416 /* If the allocation fails, just keep using the one we have */ 585 /* If the allocation fails, just keep using the one we have */
417 if (!cpu->lg->pgdirs[next].pgdir) 586 if (!cpu->lg->pgdirs[next].pgdir)
418 next = cpu->cpu_pgd; 587 next = cpu->cpu_pgd;
419 else 588 else {
420 /* This is a blank page, so there are no kernel 589#ifdef CONFIG_X86_PAE
421 * mappings: caller must map the stack! */ 590 /* In PAE mode, allocate a pmd page and populate the
591 * last pgd entry. */
592 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
593 if (!pmd_table) {
594 free_page((long)cpu->lg->pgdirs[next].pgdir);
595 set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
596 next = cpu->cpu_pgd;
597 } else {
598 set_pgd(cpu->lg->pgdirs[next].pgdir +
599 SWITCHER_PGD_INDEX,
600 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
601 /* This is a blank page, so there are no kernel
602 * mappings: caller must map the stack! */
603 *blank_pgdir = 1;
604 }
605#else
422 *blank_pgdir = 1; 606 *blank_pgdir = 1;
607#endif
608 }
423 } 609 }
424 /* Record which Guest toplevel this shadows. */ 610 /* Record which Guest toplevel this shadows. */
425 cpu->lg->pgdirs[next].gpgdir = gpgdir; 611 cpu->lg->pgdirs[next].gpgdir = gpgdir;
@@ -431,7 +617,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
431 617
432/*H:430 (iv) Switching page tables 618/*H:430 (iv) Switching page tables
433 * 619 *
434 * Now we've seen all the page table setting and manipulation, let's see what 620 * Now we've seen all the page table setting and manipulation, let's see
435 * what happens when the Guest changes page tables (ie. changes the top-level 621 * what happens when the Guest changes page tables (ie. changes the top-level
436 * pgdir). This occurs on almost every context switch. */ 622 * pgdir). This occurs on almost every context switch. */
437void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 623void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
@@ -460,10 +646,25 @@ static void release_all_pagetables(struct lguest *lg)
460 646
461 /* Every shadow pagetable this Guest has */ 647 /* Every shadow pagetable this Guest has */
462 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 648 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
463 if (lg->pgdirs[i].pgdir) 649 if (lg->pgdirs[i].pgdir) {
650#ifdef CONFIG_X86_PAE
651 pgd_t *spgd;
652 pmd_t *pmdpage;
653 unsigned int k;
654
655 /* Get the last pmd page. */
656 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
657 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
658
659 /* And release the pmd entries of that pmd page,
660 * except for the switcher pmd. */
661 for (k = 0; k < SWITCHER_PMD_INDEX; k++)
662 release_pmd(&pmdpage[k]);
663#endif
464 /* Every PGD entry except the Switcher at the top */ 664 /* Every PGD entry except the Switcher at the top */
465 for (j = 0; j < SWITCHER_PGD_INDEX; j++) 665 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
466 release_pgd(lg, lg->pgdirs[i].pgdir + j); 666 release_pgd(lg->pgdirs[i].pgdir + j);
667 }
467} 668}
468 669
469/* We also throw away everything when a Guest tells us it's changed a kernel 670/* We also throw away everything when a Guest tells us it's changed a kernel
@@ -504,24 +705,37 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
504{ 705{
505 /* Look up the matching shadow page directory entry. */ 706 /* Look up the matching shadow page directory entry. */
506 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); 707 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
708#ifdef CONFIG_X86_PAE
709 pmd_t *spmd;
710#endif
507 711
508 /* If the top level isn't present, there's no entry to update. */ 712 /* If the top level isn't present, there's no entry to update. */
509 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 713 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
510 /* Otherwise, we start by releasing the existing entry. */ 714#ifdef CONFIG_X86_PAE
511 pte_t *spte = spte_addr(*spgd, vaddr); 715 spmd = spmd_addr(cpu, *spgd, vaddr);
512 release_pte(*spte); 716 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
513 717#endif
514 /* If they're setting this entry as dirty or accessed, we might 718 /* Otherwise, we start by releasing
515 * as well put that entry they've given us in now. This shaves 719 * the existing entry. */
516 * 10% off a copy-on-write micro-benchmark. */ 720 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
517 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 721 release_pte(*spte);
518 check_gpte(cpu, gpte); 722
519 *spte = gpte_to_spte(cpu, gpte, 723 /* If they're setting this entry as dirty or accessed,
520 pte_flags(gpte) & _PAGE_DIRTY); 724 * we might as well put that entry they've given us
521 } else 725 * in now. This shaves 10% off a
522 /* Otherwise kill it and we can demand_page() it in 726 * copy-on-write micro-benchmark. */
523 * later. */ 727 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
524 *spte = __pte(0); 728 check_gpte(cpu, gpte);
729 native_set_pte(spte,
730 gpte_to_spte(cpu, gpte,
731 pte_flags(gpte) & _PAGE_DIRTY));
732 } else
733 /* Otherwise kill it and we can demand_page()
734 * it in later. */
735 native_set_pte(spte, __pte(0));
736#ifdef CONFIG_X86_PAE
737 }
738#endif
525 } 739 }
526} 740}
527 741
@@ -568,12 +782,10 @@ void guest_set_pte(struct lg_cpu *cpu,
568 * 782 *
569 * So with that in mind here's our code to to update a (top-level) PGD entry: 783 * So with that in mind here's our code to to update a (top-level) PGD entry:
570 */ 784 */
571void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) 785void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
572{ 786{
573 int pgdir; 787 int pgdir;
574 788
575 /* The kernel seems to try to initialize this early on: we ignore its
576 * attempts to map over the Switcher. */
577 if (idx >= SWITCHER_PGD_INDEX) 789 if (idx >= SWITCHER_PGD_INDEX)
578 return; 790 return;
579 791
@@ -581,8 +793,14 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
581 pgdir = find_pgdir(lg, gpgdir); 793 pgdir = find_pgdir(lg, gpgdir);
582 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 794 if (pgdir < ARRAY_SIZE(lg->pgdirs))
583 /* ... throw it away. */ 795 /* ... throw it away. */
584 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 796 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
585} 797}
798#ifdef CONFIG_X86_PAE
799void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
800{
801 guest_pagetable_clear_all(&lg->cpus[0]);
802}
803#endif
586 804
587/* Once we know how much memory we have we can construct simple identity 805/* Once we know how much memory we have we can construct simple identity
588 * (which set virtual == physical) and linear mappings 806 * (which set virtual == physical) and linear mappings
@@ -596,8 +814,16 @@ static unsigned long setup_pagetables(struct lguest *lg,
596{ 814{
597 pgd_t __user *pgdir; 815 pgd_t __user *pgdir;
598 pte_t __user *linear; 816 pte_t __user *linear;
599 unsigned int mapped_pages, i, linear_pages, phys_linear;
600 unsigned long mem_base = (unsigned long)lg->mem_base; 817 unsigned long mem_base = (unsigned long)lg->mem_base;
818 unsigned int mapped_pages, i, linear_pages;
819#ifdef CONFIG_X86_PAE
820 pmd_t __user *pmds;
821 unsigned int j;
822 pgd_t pgd;
823 pmd_t pmd;
824#else
825 unsigned int phys_linear;
826#endif
601 827
602 /* We have mapped_pages frames to map, so we need 828 /* We have mapped_pages frames to map, so we need
603 * linear_pages page tables to map them. */ 829 * linear_pages page tables to map them. */
@@ -610,6 +836,9 @@ static unsigned long setup_pagetables(struct lguest *lg,
610 /* Now we use the next linear_pages pages as pte pages */ 836 /* Now we use the next linear_pages pages as pte pages */
611 linear = (void *)pgdir - linear_pages * PAGE_SIZE; 837 linear = (void *)pgdir - linear_pages * PAGE_SIZE;
612 838
839#ifdef CONFIG_X86_PAE
840 pmds = (void *)linear - PAGE_SIZE;
841#endif
613 /* Linear mapping is easy: put every page's address into the 842 /* Linear mapping is easy: put every page's address into the
614 * mapping in order. */ 843 * mapping in order. */
615 for (i = 0; i < mapped_pages; i++) { 844 for (i = 0; i < mapped_pages; i++) {
@@ -621,6 +850,22 @@ static unsigned long setup_pagetables(struct lguest *lg,
621 850
622 /* The top level points to the linear page table pages above. 851 /* The top level points to the linear page table pages above.
623 * We setup the identity and linear mappings here. */ 852 * We setup the identity and linear mappings here. */
853#ifdef CONFIG_X86_PAE
854 for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
855 i += PTRS_PER_PTE, j++) {
856 native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
857 - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
858
859 if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
860 return -EFAULT;
861 }
862
863 set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
864 if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
865 return -EFAULT;
866 if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
867 return -EFAULT;
868#else
624 phys_linear = (unsigned long)linear - mem_base; 869 phys_linear = (unsigned long)linear - mem_base;
625 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { 870 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
626 pgd_t pgd; 871 pgd_t pgd;
@@ -633,6 +878,7 @@ static unsigned long setup_pagetables(struct lguest *lg,
633 &pgd, sizeof(pgd))) 878 &pgd, sizeof(pgd)))
634 return -EFAULT; 879 return -EFAULT;
635 } 880 }
881#endif
636 882
637 /* We return the top level (guest-physical) address: remember where 883 /* We return the top level (guest-physical) address: remember where
638 * this is. */ 884 * this is. */
@@ -648,7 +894,10 @@ int init_guest_pagetable(struct lguest *lg)
648 u64 mem; 894 u64 mem;
649 u32 initrd_size; 895 u32 initrd_size;
650 struct boot_params __user *boot = (struct boot_params *)lg->mem_base; 896 struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
651 897#ifdef CONFIG_X86_PAE
898 pgd_t *pgd;
899 pmd_t *pmd_table;
900#endif
652 /* Get the Guest memory size and the ramdisk size from the boot header 901 /* Get the Guest memory size and the ramdisk size from the boot header
653 * located at lg->mem_base (Guest address 0). */ 902 * located at lg->mem_base (Guest address 0). */
654 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) 903 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
@@ -663,6 +912,15 @@ int init_guest_pagetable(struct lguest *lg)
663 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 912 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
664 if (!lg->pgdirs[0].pgdir) 913 if (!lg->pgdirs[0].pgdir)
665 return -ENOMEM; 914 return -ENOMEM;
915#ifdef CONFIG_X86_PAE
916 pgd = lg->pgdirs[0].pgdir;
917 pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
918 if (!pmd_table)
919 return -ENOMEM;
920
921 set_pgd(pgd + SWITCHER_PGD_INDEX,
922 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
923#endif
666 lg->cpus[0].cpu_pgd = 0; 924 lg->cpus[0].cpu_pgd = 0;
667 return 0; 925 return 0;
668} 926}
@@ -672,17 +930,24 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
672{ 930{
673 /* We get the kernel address: above this is all kernel memory. */ 931 /* We get the kernel address: above this is all kernel memory. */
674 if (get_user(cpu->lg->kernel_address, 932 if (get_user(cpu->lg->kernel_address,
675 &cpu->lg->lguest_data->kernel_address) 933 &cpu->lg->lguest_data->kernel_address)
676 /* We tell the Guest that it can't use the top 4MB of virtual 934 /* We tell the Guest that it can't use the top 2 or 4 MB
677 * addresses used by the Switcher. */ 935 * of virtual addresses used by the Switcher. */
678 || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) 936 || put_user(RESERVE_MEM * 1024 * 1024,
679 || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) 937 &cpu->lg->lguest_data->reserve_mem)
938 || put_user(cpu->lg->pgdirs[0].gpgdir,
939 &cpu->lg->lguest_data->pgdir))
680 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 940 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
681 941
682 /* In flush_user_mappings() we loop from 0 to 942 /* In flush_user_mappings() we loop from 0 to
683 * "pgd_index(lg->kernel_address)". This assumes it won't hit the 943 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
684 * Switcher mappings, so check that now. */ 944 * Switcher mappings, so check that now. */
945#ifdef CONFIG_X86_PAE
946 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
947 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
948#else
685 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) 949 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
950#endif
686 kill_guest(cpu, "bad kernel address %#lx", 951 kill_guest(cpu, "bad kernel address %#lx",
687 cpu->lg->kernel_address); 952 cpu->lg->kernel_address);
688} 953}
@@ -708,16 +973,30 @@ void free_guest_pagetable(struct lguest *lg)
708void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 973void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
709{ 974{
710 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 975 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
711 pgd_t switcher_pgd;
712 pte_t regs_pte; 976 pte_t regs_pte;
713 unsigned long pfn; 977 unsigned long pfn;
714 978
979#ifdef CONFIG_X86_PAE
980 pmd_t switcher_pmd;
981 pmd_t *pmd_table;
982
983 native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
984 PAGE_SHIFT, PAGE_KERNEL_EXEC));
985
986 pmd_table = __va(pgd_pfn(cpu->lg->
987 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
988 << PAGE_SHIFT);
989 native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
990#else
991 pgd_t switcher_pgd;
992
715 /* Make the last PGD entry for this Guest point to the Switcher's PTE 993 /* Make the last PGD entry for this Guest point to the Switcher's PTE
716 * page for this CPU (with appropriate flags). */ 994 * page for this CPU (with appropriate flags). */
717 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); 995 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
718 996
719 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 997 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
720 998
999#endif
721 /* We also change the Switcher PTE page. When we're running the Guest, 1000 /* We also change the Switcher PTE page. When we're running the Guest,
722 * we want the Guest's "regs" page to appear where the first Switcher 1001 * we want the Guest's "regs" page to appear where the first Switcher
723 * page for this CPU is. This is an optimization: when the Switcher 1002 * page for this CPU is. This is an optimization: when the Switcher
@@ -726,8 +1005,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
726 * page is already mapped there, we don't have to copy them out 1005 * page is already mapped there, we don't have to copy them out
727 * again. */ 1006 * again. */
728 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; 1007 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
729 regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); 1008 native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
730 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; 1009 native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
1010 regs_pte);
731} 1011}
732/*:*/ 1012/*:*/
733 1013
@@ -752,21 +1032,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
752 1032
753 /* The first entries are easy: they map the Switcher code. */ 1033 /* The first entries are easy: they map the Switcher code. */
754 for (i = 0; i < pages; i++) { 1034 for (i = 0; i < pages; i++) {
755 pte[i] = mk_pte(switcher_page[i], 1035 native_set_pte(&pte[i], mk_pte(switcher_page[i],
756 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 1036 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
757 } 1037 }
758 1038
759 /* The only other thing we map is this CPU's pair of pages. */ 1039 /* The only other thing we map is this CPU's pair of pages. */
760 i = pages + cpu*2; 1040 i = pages + cpu*2;
761 1041
762 /* First page (Guest registers) is writable from the Guest */ 1042 /* First page (Guest registers) is writable from the Guest */
763 pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), 1043 native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
764 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); 1044 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
765 1045
766 /* The second page contains the "struct lguest_ro_state", and is 1046 /* The second page contains the "struct lguest_ro_state", and is
767 * read-only. */ 1047 * read-only. */
768 pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), 1048 native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
769 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 1049 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
770} 1050}
771 1051
772/* We've made it through the page table code. Perhaps our tired brains are 1052/* We've made it through the page table code. Perhaps our tired brains are
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 7ede64ffeef9..482ed5a18750 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
150{ 150{
151 /* We assume the Guest has the same number of GDT entries as the 151 /* We assume the Guest has the same number of GDT entries as the
152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
153 if (num > ARRAY_SIZE(cpu->arch.gdt)) 153 if (num >= ARRAY_SIZE(cpu->arch.gdt))
154 kill_guest(cpu, "too many gdt entries %i", num); 154 kill_guest(cpu, "too many gdt entries %i", num);
155 155
156 /* Set it up, then fix it. */ 156 /* Set it up, then fix it. */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d496a99e034..44b931504457 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -146,7 +146,6 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
148 148
149static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
150 149
151/* 150/*
152 * Driver Callback Index's 151 * Driver Callback Index's
@@ -159,7 +158,8 @@ static u8 last_drv_idx;
159 * Forward protos... 158 * Forward protos...
160 */ 159 */
161static irqreturn_t mpt_interrupt(int irq, void *bus_id); 160static irqreturn_t mpt_interrupt(int irq, void *bus_id);
162static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); 161static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
162 MPT_FRAME_HDR *reply);
163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, 163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
164 u32 *req, int replyBytes, u16 *u16reply, int maxwait, 164 u32 *req, int replyBytes, u16 *u16reply, int maxwait,
165 int sleepFlag); 165 int sleepFlag);
@@ -190,9 +190,9 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); 192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
193static void mpt_timer_expired(unsigned long data);
194static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); 193static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
195static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 194static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
195 int sleepFlag);
196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); 197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); 198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
@@ -207,8 +207,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
207#endif 207#endif
208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); 208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
209 209
210//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 210static int ProcessEventNotification(MPT_ADAPTER *ioc,
211static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); 211 EventNotificationReply_t *evReply, int *evHandlers);
212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); 214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
@@ -277,6 +277,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
277} 277}
278 278
279/** 279/**
280 * mpt_is_discovery_complete - determine if discovery has completed
281 * @ioc: per adatper instance
282 *
283 * Returns 1 when discovery completed, else zero.
284 */
285static int
286mpt_is_discovery_complete(MPT_ADAPTER *ioc)
287{
288 ConfigExtendedPageHeader_t hdr;
289 CONFIGPARMS cfg;
290 SasIOUnitPage0_t *buffer;
291 dma_addr_t dma_handle;
292 int rc = 0;
293
294 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
295 memset(&cfg, 0, sizeof(CONFIGPARMS));
296 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
297 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
298 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
299 cfg.cfghdr.ehdr = &hdr;
300 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
301
302 if ((mpt_config(ioc, &cfg)))
303 goto out;
304 if (!hdr.ExtPageLength)
305 goto out;
306
307 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
308 &dma_handle);
309 if (!buffer)
310 goto out;
311
312 cfg.physAddr = dma_handle;
313 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
314
315 if ((mpt_config(ioc, &cfg)))
316 goto out_free_consistent;
317
318 if (!(buffer->PhyData[0].PortFlags &
319 MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
320 rc = 1;
321
322 out_free_consistent:
323 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
324 buffer, dma_handle);
325 out:
326 return rc;
327}
328
329/**
280 * mpt_fault_reset_work - work performed on workq after ioc fault 330 * mpt_fault_reset_work - work performed on workq after ioc fault
281 * @work: input argument, used to derive ioc 331 * @work: input argument, used to derive ioc
282 * 332 *
@@ -290,7 +340,7 @@ mpt_fault_reset_work(struct work_struct *work)
290 int rc; 340 int rc;
291 unsigned long flags; 341 unsigned long flags;
292 342
293 if (ioc->diagPending || !ioc->active) 343 if (ioc->ioc_reset_in_progress || !ioc->active)
294 goto out; 344 goto out;
295 345
296 ioc_raw_state = mpt_GetIocState(ioc, 0); 346 ioc_raw_state = mpt_GetIocState(ioc, 0);
@@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work)
307 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 357 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
308 "reset (%04xh)\n", ioc->name, ioc_raw_state & 358 "reset (%04xh)\n", ioc->name, ioc_raw_state &
309 MPI_DOORBELL_DATA_MASK); 359 MPI_DOORBELL_DATA_MASK);
360 } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
361 if ((mpt_is_discovery_complete(ioc))) {
362 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
363 "discovery_quiesce_io flag\n", ioc->name));
364 ioc->sas_discovery_quiesce_io = 0;
365 }
310 } 366 }
311 367
312 out: 368 out:
@@ -317,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work)
317 ioc = ioc->alt_ioc; 373 ioc = ioc->alt_ioc;
318 374
319 /* rearm the timer */ 375 /* rearm the timer */
320 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 376 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
321 if (ioc->reset_work_q) 377 if (ioc->reset_work_q)
322 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, 378 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
323 msecs_to_jiffies(MPT_POLLING_INTERVAL)); 379 msecs_to_jiffies(MPT_POLLING_INTERVAL));
324 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 380 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
325} 381}
326 382
327 383
@@ -501,9 +557,9 @@ mpt_interrupt(int irq, void *bus_id)
501 557
502/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 558/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
503/** 559/**
504 * mpt_base_reply - MPT base driver's callback routine 560 * mptbase_reply - MPT base driver's callback routine
505 * @ioc: Pointer to MPT_ADAPTER structure 561 * @ioc: Pointer to MPT_ADAPTER structure
506 * @mf: Pointer to original MPT request frame 562 * @req: Pointer to original MPT request frame
507 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 563 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
508 * 564 *
509 * MPT base driver's callback routine; all base driver 565 * MPT base driver's callback routine; all base driver
@@ -514,122 +570,49 @@ mpt_interrupt(int irq, void *bus_id)
514 * should be freed, or 0 if it shouldn't. 570 * should be freed, or 0 if it shouldn't.
515 */ 571 */
516static int 572static int
517mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) 573mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
518{ 574{
575 EventNotificationReply_t *pEventReply;
576 u8 event;
577 int evHandlers;
519 int freereq = 1; 578 int freereq = 1;
520 u8 func;
521 579
522 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name)); 580 switch (reply->u.hdr.Function) {
523#ifdef CONFIG_FUSION_LOGGING 581 case MPI_FUNCTION_EVENT_NOTIFICATION:
524 if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) && 582 pEventReply = (EventNotificationReply_t *)reply;
525 !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { 583 evHandlers = 0;
526 dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n", 584 ProcessEventNotification(ioc, pEventReply, &evHandlers);
527 ioc->name, mf)); 585 event = le32_to_cpu(pEventReply->Event) & 0xFF;
528 DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf); 586 if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
529 }
530#endif
531
532 func = reply->u.hdr.Function;
533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n",
534 ioc->name, func));
535
536 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
537 EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
538 int evHandlers = 0;
539 int results;
540
541 results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
542 if (results != evHandlers) {
543 /* CHECKME! Any special handling needed here? */
544 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
545 ioc->name, evHandlers, results));
546 }
547
548 /*
549 * Hmmm... It seems that EventNotificationReply is an exception
550 * to the rule of one reply per request.
551 */
552 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
553 freereq = 0; 587 freereq = 0;
554 } else { 588 if (event != MPI_EVENT_EVENT_CHANGE)
555 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 589 break;
556 ioc->name, pEvReply)); 590 case MPI_FUNCTION_CONFIG:
557 } 591 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
558 592 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
559#ifdef CONFIG_PROC_FS 593 if (reply) {
560// LogEvent(ioc, pEvReply); 594 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
561#endif 595 memcpy(ioc->mptbase_cmds.reply, reply,
562 596 min(MPT_DEFAULT_FRAME_SIZE,
563 } else if (func == MPI_FUNCTION_EVENT_ACK) { 597 4 * reply->u.reply.MsgLength));
564 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n",
565 ioc->name));
566 } else if (func == MPI_FUNCTION_CONFIG) {
567 CONFIGPARMS *pCfg;
568 unsigned long flags;
569
570 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n",
571 ioc->name, mf, reply));
572
573 pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
574
575 if (pCfg) {
576 /* disable timer and remove from linked list */
577 del_timer(&pCfg->timer);
578
579 spin_lock_irqsave(&ioc->FreeQlock, flags);
580 list_del(&pCfg->linkage);
581 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
582
583 /*
584 * If IOC Status is SUCCESS, save the header
585 * and set the status code to GOOD.
586 */
587 pCfg->status = MPT_CONFIG_ERROR;
588 if (reply) {
589 ConfigReply_t *pReply = (ConfigReply_t *)reply;
590 u16 status;
591
592 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
593 dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n",
594 ioc->name, status, le32_to_cpu(pReply->IOCLogInfo)));
595
596 pCfg->status = status;
597 if (status == MPI_IOCSTATUS_SUCCESS) {
598 if ((pReply->Header.PageType &
599 MPI_CONFIG_PAGETYPE_MASK) ==
600 MPI_CONFIG_PAGETYPE_EXTENDED) {
601 pCfg->cfghdr.ehdr->ExtPageLength =
602 le16_to_cpu(pReply->ExtPageLength);
603 pCfg->cfghdr.ehdr->ExtPageType =
604 pReply->ExtPageType;
605 }
606 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
607
608 /* If this is a regular header, save PageLength. */
609 /* LMP Do this better so not using a reserved field! */
610 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
611 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
612 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
613 }
614 }
615
616 /*
617 * Wake up the original calling thread
618 */
619 pCfg->wait_done = 1;
620 wake_up(&mpt_waitq);
621 } 598 }
622 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) { 599 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
623 /* we should be always getting a reply frame */ 600 ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
624 memcpy(ioc->persist_reply_frame, reply, 601 complete(&ioc->mptbase_cmds.done);
625 min(MPT_DEFAULT_FRAME_SIZE, 602 } else
626 4*reply->u.reply.MsgLength)); 603 freereq = 0;
627 del_timer(&ioc->persist_timer); 604 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
628 ioc->persist_wait_done = 1; 605 freereq = 1;
629 wake_up(&mpt_waitq); 606 break;
630 } else { 607 case MPI_FUNCTION_EVENT_ACK:
631 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 608 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
632 ioc->name, func); 609 "EventAck reply received\n", ioc->name));
610 break;
611 default:
612 printk(MYIOC_s_ERR_FMT
613 "Unexpected msg function (=%02Xh) reply received!\n",
614 ioc->name, reply->u.hdr.Function);
615 break;
633 } 616 }
634 617
635 /* 618 /*
@@ -988,17 +971,21 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
988 971
989 /* Put Request back on FreeQ! */ 972 /* Put Request back on FreeQ! */
990 spin_lock_irqsave(&ioc->FreeQlock, flags); 973 spin_lock_irqsave(&ioc->FreeQlock, flags);
991 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */ 974 if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
975 goto out;
976 /* signature to know if this mf is freed */
977 mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
992 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 978 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
993#ifdef MFCNT 979#ifdef MFCNT
994 ioc->mfcnt--; 980 ioc->mfcnt--;
995#endif 981#endif
982 out:
996 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 983 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
997} 984}
998 985
999/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1000/** 987/**
1001 * mpt_add_sge - Place a simple SGE at address pAddr. 988 * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
1002 * @pAddr: virtual address for SGE 989 * @pAddr: virtual address for SGE
1003 * @flagslength: SGE flags and data transfer length 990 * @flagslength: SGE flags and data transfer length
1004 * @dma_addr: Physical address 991 * @dma_addr: Physical address
@@ -1006,23 +993,117 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1006 * This routine places a MPT request frame back on the MPT adapter's 993 * This routine places a MPT request frame back on the MPT adapter's
1007 * FreeQ. 994 * FreeQ.
1008 */ 995 */
1009void 996static void
1010mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) 997mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1011{ 998{
1012 if (sizeof(dma_addr_t) == sizeof(u64)) { 999 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
1013 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1000 pSge->FlagsLength = cpu_to_le32(flagslength);
1001 pSge->Address = cpu_to_le32(dma_addr);
1002}
1003
1004/**
1005 * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
1006 * @pAddr: virtual address for SGE
1007 * @flagslength: SGE flags and data transfer length
1008 * @dma_addr: Physical address
1009 *
1010 * This routine places a MPT request frame back on the MPT adapter's
1011 * FreeQ.
1012 **/
1013static void
1014mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1015{
1016 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1017 pSge->Address.Low = cpu_to_le32
1018 (lower_32_bits((unsigned long)(dma_addr)));
1019 pSge->Address.High = cpu_to_le32
1020 (upper_32_bits((unsigned long)dma_addr));
1021 pSge->FlagsLength = cpu_to_le32
1022 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1023}
1024
1025/**
1026 * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr
1027 * (1078 workaround).
1028 * @pAddr: virtual address for SGE
1029 * @flagslength: SGE flags and data transfer length
1030 * @dma_addr: Physical address
1031 *
1032 * This routine places a MPT request frame back on the MPT adapter's
1033 * FreeQ.
1034 **/
1035static void
1036mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1037{
1038 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1039 u32 tmp;
1040
1041 pSge->Address.Low = cpu_to_le32
1042 (lower_32_bits((unsigned long)(dma_addr)));
1043 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1044
1045 /*
1046 * 1078 errata workaround for the 36GB limitation
1047 */
1048 if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
1049 flagslength |=
1050 MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
1051 tmp |= (1<<31);
1052 if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
1053 printk(KERN_DEBUG "1078 P0M2 addressing for "
1054 "addr = 0x%llx len = %d\n",
1055 (unsigned long long)dma_addr,
1056 MPI_SGE_LENGTH(flagslength));
1057 }
1058
1059 pSge->Address.High = cpu_to_le32(tmp);
1060 pSge->FlagsLength = cpu_to_le32(
1061 (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1062}
1063
1064/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1065/**
1066 * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
1067 * @pAddr: virtual address for SGE
1068 * @next: nextChainOffset value (u32's)
1069 * @length: length of next SGL segment
1070 * @dma_addr: Physical address
1071 *
1072 */
1073static void
1074mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1075{
1076 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1077 pChain->Length = cpu_to_le16(length);
1078 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1079 pChain->NextChainOffset = next;
1080 pChain->Address = cpu_to_le32(dma_addr);
1081}
1082
1083/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1084/**
1085 * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
1086 * @pAddr: virtual address for SGE
1087 * @next: nextChainOffset value (u32's)
1088 * @length: length of next SGL segment
1089 * @dma_addr: Physical address
1090 *
1091 */
1092static void
1093mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1094{
1095 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1014 u32 tmp = dma_addr & 0xFFFFFFFF; 1096 u32 tmp = dma_addr & 0xFFFFFFFF;
1015 1097
1016 pSge->FlagsLength = cpu_to_le32(flagslength); 1098 pChain->Length = cpu_to_le16(length);
1017 pSge->Address.Low = cpu_to_le32(tmp); 1099 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1018 tmp = (u32) ((u64)dma_addr >> 32); 1100 MPI_SGE_FLAGS_64_BIT_ADDRESSING);
1019 pSge->Address.High = cpu_to_le32(tmp);
1020 1101
1021 } else { 1102 pChain->NextChainOffset = next;
1022 SGESimple32_t *pSge = (SGESimple32_t *) pAddr; 1103
1023 pSge->FlagsLength = cpu_to_le32(flagslength); 1104 pChain->Address.Low = cpu_to_le32(tmp);
1024 pSge->Address = cpu_to_le32(dma_addr); 1105 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1025 } 1106 pChain->Address.High = cpu_to_le32(tmp);
1026} 1107}
1027 1108
1028/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1109/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1225,7 +1306,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1225 } 1306 }
1226 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; 1307 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1227 flags_length |= ioc->HostPageBuffer_sz; 1308 flags_length |= ioc->HostPageBuffer_sz;
1228 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1309 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1229 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; 1310 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1230 1311
1231return 0; 1312return 0;
@@ -1534,21 +1615,42 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1534 1615
1535 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1616 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1536 1617
1537 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1618 if (sizeof(dma_addr_t) > 4) {
1538 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1619 const uint64_t required_mask = dma_get_required_mask
1539 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1620 (&pdev->dev);
1540 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1621 if (required_mask > DMA_BIT_MASK(32)
1541 ioc->name)); 1622 && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1542 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1623 && !pci_set_consistent_dma_mask(pdev,
1543 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1624 DMA_BIT_MASK(64))) {
1544 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1625 ioc->dma_mask = DMA_BIT_MASK(64);
1545 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1626 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1546 ioc->name)); 1627 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1628 ioc->name));
1629 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1630 && !pci_set_consistent_dma_mask(pdev,
1631 DMA_BIT_MASK(32))) {
1632 ioc->dma_mask = DMA_BIT_MASK(32);
1633 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1634 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1635 ioc->name));
1636 } else {
1637 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1638 ioc->name, pci_name(pdev));
1639 return r;
1640 }
1547 } else { 1641 } else {
1548 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1642 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1549 ioc->name, pci_name(pdev)); 1643 && !pci_set_consistent_dma_mask(pdev,
1550 pci_release_selected_regions(pdev, ioc->bars); 1644 DMA_BIT_MASK(32))) {
1551 return r; 1645 ioc->dma_mask = DMA_BIT_MASK(32);
1646 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1647 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1648 ioc->name));
1649 } else {
1650 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1651 ioc->name, pci_name(pdev));
1652 return r;
1653 }
1552 } 1654 }
1553 1655
1554 mem_phys = msize = 0; 1656 mem_phys = msize = 0;
@@ -1632,6 +1734,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1632 1734
1633 ioc->id = mpt_ids++; 1735 ioc->id = mpt_ids++;
1634 sprintf(ioc->name, "ioc%d", ioc->id); 1736 sprintf(ioc->name, "ioc%d", ioc->id);
1737 dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1635 1738
1636 /* 1739 /*
1637 * set initial debug level 1740 * set initial debug level
@@ -1650,14 +1753,36 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1650 return r; 1753 return r;
1651 } 1754 }
1652 1755
1756 /*
1757 * Setting up proper handlers for scatter gather handling
1758 */
1759 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
1760 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
1761 ioc->add_sge = &mpt_add_sge_64bit_1078;
1762 else
1763 ioc->add_sge = &mpt_add_sge_64bit;
1764 ioc->add_chain = &mpt_add_chain_64bit;
1765 ioc->sg_addr_size = 8;
1766 } else {
1767 ioc->add_sge = &mpt_add_sge;
1768 ioc->add_chain = &mpt_add_chain;
1769 ioc->sg_addr_size = 4;
1770 }
1771 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
1772
1653 ioc->alloc_total = sizeof(MPT_ADAPTER); 1773 ioc->alloc_total = sizeof(MPT_ADAPTER);
1654 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1774 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1655 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1775 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1656 1776
1657 ioc->pcidev = pdev; 1777 ioc->pcidev = pdev;
1658 ioc->diagPending = 0; 1778
1659 spin_lock_init(&ioc->diagLock); 1779 spin_lock_init(&ioc->taskmgmt_lock);
1660 spin_lock_init(&ioc->initializing_hba_lock); 1780 mutex_init(&ioc->internal_cmds.mutex);
1781 init_completion(&ioc->internal_cmds.done);
1782 mutex_init(&ioc->mptbase_cmds.mutex);
1783 init_completion(&ioc->mptbase_cmds.done);
1784 mutex_init(&ioc->taskmgmt_cmds.mutex);
1785 init_completion(&ioc->taskmgmt_cmds.done);
1661 1786
1662 /* Initialize the event logging. 1787 /* Initialize the event logging.
1663 */ 1788 */
@@ -1670,16 +1795,13 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1670 ioc->mfcnt = 0; 1795 ioc->mfcnt = 0;
1671#endif 1796#endif
1672 1797
1798 ioc->sh = NULL;
1673 ioc->cached_fw = NULL; 1799 ioc->cached_fw = NULL;
1674 1800
1675 /* Initilize SCSI Config Data structure 1801 /* Initilize SCSI Config Data structure
1676 */ 1802 */
1677 memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); 1803 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1678 1804
1679 /* Initialize the running configQ head.
1680 */
1681 INIT_LIST_HEAD(&ioc->configQ);
1682
1683 /* Initialize the fc rport list head. 1805 /* Initialize the fc rport list head.
1684 */ 1806 */
1685 INIT_LIST_HEAD(&ioc->fc_rports); 1807 INIT_LIST_HEAD(&ioc->fc_rports);
@@ -1690,9 +1812,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1690 1812
1691 /* Initialize workqueue */ 1813 /* Initialize workqueue */
1692 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); 1814 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1693 spin_lock_init(&ioc->fault_reset_work_lock);
1694 1815
1695 snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name), 1816 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1696 "mpt_poll_%d", ioc->id); 1817 "mpt_poll_%d", ioc->id);
1697 ioc->reset_work_q = 1818 ioc->reset_work_q =
1698 create_singlethread_workqueue(ioc->reset_work_q_name); 1819 create_singlethread_workqueue(ioc->reset_work_q_name);
@@ -1767,11 +1888,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1767 case MPI_MANUFACTPAGE_DEVID_SAS1064: 1888 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1768 case MPI_MANUFACTPAGE_DEVID_SAS1068: 1889 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1769 ioc->errata_flag_1064 = 1; 1890 ioc->errata_flag_1064 = 1;
1891 ioc->bus_type = SAS;
1892 break;
1770 1893
1771 case MPI_MANUFACTPAGE_DEVID_SAS1064E: 1894 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1772 case MPI_MANUFACTPAGE_DEVID_SAS1068E: 1895 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1773 case MPI_MANUFACTPAGE_DEVID_SAS1078: 1896 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1774 ioc->bus_type = SAS; 1897 ioc->bus_type = SAS;
1898 break;
1775 } 1899 }
1776 1900
1777 1901
@@ -1813,6 +1937,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1813 */ 1937 */
1814 mpt_detect_bound_ports(ioc, pdev); 1938 mpt_detect_bound_ports(ioc, pdev);
1815 1939
1940 INIT_LIST_HEAD(&ioc->fw_event_list);
1941 spin_lock_init(&ioc->fw_event_lock);
1942 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
1943 ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
1944
1816 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 1945 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
1817 CAN_SLEEP)) != 0){ 1946 CAN_SLEEP)) != 0){
1818 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", 1947 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
@@ -1885,13 +2014,18 @@ mpt_detach(struct pci_dev *pdev)
1885 /* 2014 /*
1886 * Stop polling ioc for fault condition 2015 * Stop polling ioc for fault condition
1887 */ 2016 */
1888 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 2017 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1889 wq = ioc->reset_work_q; 2018 wq = ioc->reset_work_q;
1890 ioc->reset_work_q = NULL; 2019 ioc->reset_work_q = NULL;
1891 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 2020 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1892 cancel_delayed_work(&ioc->fault_reset_work); 2021 cancel_delayed_work(&ioc->fault_reset_work);
1893 destroy_workqueue(wq); 2022 destroy_workqueue(wq);
1894 2023
2024 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2025 wq = ioc->fw_event_q;
2026 ioc->fw_event_q = NULL;
2027 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2028 destroy_workqueue(wq);
1895 2029
1896 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); 2030 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
1897 remove_proc_entry(pname, NULL); 2031 remove_proc_entry(pname, NULL);
@@ -1994,6 +2128,21 @@ mpt_resume(struct pci_dev *pdev)
1994 if (err) 2128 if (err)
1995 return err; 2129 return err;
1996 2130
2131 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
2132 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
2133 ioc->add_sge = &mpt_add_sge_64bit_1078;
2134 else
2135 ioc->add_sge = &mpt_add_sge_64bit;
2136 ioc->add_chain = &mpt_add_chain_64bit;
2137 ioc->sg_addr_size = 8;
2138 } else {
2139
2140 ioc->add_sge = &mpt_add_sge;
2141 ioc->add_chain = &mpt_add_chain;
2142 ioc->sg_addr_size = 4;
2143 }
2144 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
2145
1997 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", 2146 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
1998 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), 2147 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
1999 CHIPREG_READ32(&ioc->chip->Doorbell)); 2148 CHIPREG_READ32(&ioc->chip->Doorbell));
@@ -2091,12 +2240,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2091 ioc->active = 0; 2240 ioc->active = 0;
2092 2241
2093 if (ioc->alt_ioc) { 2242 if (ioc->alt_ioc) {
2094 if (ioc->alt_ioc->active) 2243 if (ioc->alt_ioc->active ||
2244 reason == MPT_HOSTEVENT_IOC_RECOVER) {
2095 reset_alt_ioc_active = 1; 2245 reset_alt_ioc_active = 1;
2096 2246 /* Disable alt-IOC's reply interrupts
2097 /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */ 2247 * (and FreeQ) for a bit
2098 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); 2248 **/
2099 ioc->alt_ioc->active = 0; 2249 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2250 0xFFFFFFFF);
2251 ioc->alt_ioc->active = 0;
2252 }
2100 } 2253 }
2101 2254
2102 hard = 1; 2255 hard = 1;
@@ -2117,9 +2270,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2117 } 2270 }
2118 2271
2119 } else { 2272 } else {
2120 printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name); 2273 printk(MYIOC_s_WARN_FMT
2274 "NOT READY WARNING!\n", ioc->name);
2121 } 2275 }
2122 return -1; 2276 ret = -1;
2277 goto out;
2123 } 2278 }
2124 2279
2125 /* hard_reset_done = 0 if a soft reset was performed 2280 /* hard_reset_done = 0 if a soft reset was performed
@@ -2129,7 +2284,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2129 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) 2284 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
2130 alt_ioc_ready = 1; 2285 alt_ioc_ready = 1;
2131 else 2286 else
2132 printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name); 2287 printk(MYIOC_s_WARN_FMT
2288 ": alt-ioc Not ready WARNING!\n",
2289 ioc->alt_ioc->name);
2133 } 2290 }
2134 2291
2135 for (ii=0; ii<5; ii++) { 2292 for (ii=0; ii<5; ii++) {
@@ -2150,7 +2307,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2150 if (alt_ioc_ready) { 2307 if (alt_ioc_ready) {
2151 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { 2308 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
2152 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2309 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2153 "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); 2310 "Initial Alt IocFacts failed rc=%x\n",
2311 ioc->name, rc));
2154 /* Retry - alt IOC was initialized once 2312 /* Retry - alt IOC was initialized once
2155 */ 2313 */
2156 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); 2314 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
@@ -2194,16 +2352,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2194 IRQF_SHARED, ioc->name, ioc); 2352 IRQF_SHARED, ioc->name, ioc);
2195 if (rc < 0) { 2353 if (rc < 0) {
2196 printk(MYIOC_s_ERR_FMT "Unable to allocate " 2354 printk(MYIOC_s_ERR_FMT "Unable to allocate "
2197 "interrupt %d!\n", ioc->name, ioc->pcidev->irq); 2355 "interrupt %d!\n",
2356 ioc->name, ioc->pcidev->irq);
2198 if (ioc->msi_enable) 2357 if (ioc->msi_enable)
2199 pci_disable_msi(ioc->pcidev); 2358 pci_disable_msi(ioc->pcidev);
2200 return -EBUSY; 2359 ret = -EBUSY;
2360 goto out;
2201 } 2361 }
2202 irq_allocated = 1; 2362 irq_allocated = 1;
2203 ioc->pci_irq = ioc->pcidev->irq; 2363 ioc->pci_irq = ioc->pcidev->irq;
2204 pci_set_master(ioc->pcidev); /* ?? */ 2364 pci_set_master(ioc->pcidev); /* ?? */
2205 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2365 pci_set_drvdata(ioc->pcidev, ioc);
2206 "%d\n", ioc->name, ioc->pcidev->irq)); 2366 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2367 "installed at interrupt %d\n", ioc->name,
2368 ioc->pcidev->irq));
2207 } 2369 }
2208 } 2370 }
2209 2371
@@ -2212,17 +2374,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2212 * init as upper addresses are needed for init. 2374 * init as upper addresses are needed for init.
2213 * If fails, continue with alt-ioc processing 2375 * If fails, continue with alt-ioc processing
2214 */ 2376 */
2377 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
2378 ioc->name));
2215 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) 2379 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
2216 ret = -3; 2380 ret = -3;
2217 2381
2218 /* May need to check/upload firmware & data here! 2382 /* May need to check/upload firmware & data here!
2219 * If fails, continue with alt-ioc processing 2383 * If fails, continue with alt-ioc processing
2220 */ 2384 */
2385 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
2386 ioc->name));
2221 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) 2387 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
2222 ret = -4; 2388 ret = -4;
2223// NEW! 2389// NEW!
2224 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { 2390 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
2225 printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n", 2391 printk(MYIOC_s_WARN_FMT
2392 ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
2226 ioc->alt_ioc->name, rc); 2393 ioc->alt_ioc->name, rc);
2227 alt_ioc_ready = 0; 2394 alt_ioc_ready = 0;
2228 reset_alt_ioc_active = 0; 2395 reset_alt_ioc_active = 0;
@@ -2232,8 +2399,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2232 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { 2399 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
2233 alt_ioc_ready = 0; 2400 alt_ioc_ready = 0;
2234 reset_alt_ioc_active = 0; 2401 reset_alt_ioc_active = 0;
2235 printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n", 2402 printk(MYIOC_s_WARN_FMT
2236 ioc->alt_ioc->name, rc); 2403 ": alt-ioc: (%d) init failure WARNING!\n",
2404 ioc->alt_ioc->name, rc);
2237 } 2405 }
2238 } 2406 }
2239 2407
@@ -2269,28 +2437,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2269 } 2437 }
2270 } 2438 }
2271 2439
2440 /* Enable MPT base driver management of EventNotification
2441 * and EventAck handling.
2442 */
2443 if ((ret == 0) && (!ioc->facts.EventState)) {
2444 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2445 "SendEventNotification\n",
2446 ioc->name));
2447 ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
2448 }
2449
2450 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2451 rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
2452
2272 if (ret == 0) { 2453 if (ret == 0) {
2273 /* Enable! (reply interrupt) */ 2454 /* Enable! (reply interrupt) */
2274 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 2455 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
2275 ioc->active = 1; 2456 ioc->active = 1;
2276 } 2457 }
2277 2458 if (rc == 0) { /* alt ioc */
2278 if (reset_alt_ioc_active && ioc->alt_ioc) { 2459 if (reset_alt_ioc_active && ioc->alt_ioc) {
2279 /* (re)Enable alt-IOC! (reply interrupt) */ 2460 /* (re)Enable alt-IOC! (reply interrupt) */
2280 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n", 2461 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
2281 ioc->alt_ioc->name)); 2462 "reply irq re-enabled\n",
2282 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); 2463 ioc->alt_ioc->name));
2283 ioc->alt_ioc->active = 1; 2464 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2465 MPI_HIM_DIM);
2466 ioc->alt_ioc->active = 1;
2467 }
2284 } 2468 }
2285 2469
2286 /* Enable MPT base driver management of EventNotification
2287 * and EventAck handling.
2288 */
2289 if ((ret == 0) && (!ioc->facts.EventState))
2290 (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */
2291
2292 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2293 (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */
2294 2470
2295 /* Add additional "reason" check before call to GetLanConfigPages 2471 /* Add additional "reason" check before call to GetLanConfigPages
2296 * (combined with GetIoUnitPage2 call). This prevents a somewhat 2472 * (combined with GetIoUnitPage2 call). This prevents a somewhat
@@ -2306,8 +2482,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2306 mutex_init(&ioc->raid_data.inactive_list_mutex); 2482 mutex_init(&ioc->raid_data.inactive_list_mutex);
2307 INIT_LIST_HEAD(&ioc->raid_data.inactive_list); 2483 INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
2308 2484
2309 if (ioc->bus_type == SAS) { 2485 switch (ioc->bus_type) {
2310 2486
2487 case SAS:
2311 /* clear persistency table */ 2488 /* clear persistency table */
2312 if(ioc->facts.IOCExceptions & 2489 if(ioc->facts.IOCExceptions &
2313 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { 2490 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
@@ -2321,8 +2498,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2321 */ 2498 */
2322 mpt_findImVolumes(ioc); 2499 mpt_findImVolumes(ioc);
2323 2500
2324 } else if (ioc->bus_type == FC) { 2501 /* Check, and possibly reset, the coalescing value
2325 if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && 2502 */
2503 mpt_read_ioc_pg_1(ioc);
2504
2505 break;
2506
2507 case FC:
2508 if ((ioc->pfacts[0].ProtocolFlags &
2509 MPI_PORTFACTS_PROTOCOL_LAN) &&
2326 (ioc->lan_cnfg_page0.Header.PageLength == 0)) { 2510 (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
2327 /* 2511 /*
2328 * Pre-fetch the ports LAN MAC address! 2512 * Pre-fetch the ports LAN MAC address!
@@ -2331,11 +2515,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2331 (void) GetLanConfigPages(ioc); 2515 (void) GetLanConfigPages(ioc);
2332 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 2516 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
2333 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2517 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2334 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", 2518 "LanAddr = %02X:%02X:%02X"
2335 ioc->name, a[5], a[4], a[3], a[2], a[1], a[0])); 2519 ":%02X:%02X:%02X\n",
2336 2520 ioc->name, a[5], a[4],
2521 a[3], a[2], a[1], a[0]));
2337 } 2522 }
2338 } else { 2523 break;
2524
2525 case SPI:
2339 /* Get NVRAM and adapter maximums from SPP 0 and 2 2526 /* Get NVRAM and adapter maximums from SPP 0 and 2
2340 */ 2527 */
2341 mpt_GetScsiPortSettings(ioc, 0); 2528 mpt_GetScsiPortSettings(ioc, 0);
@@ -2354,6 +2541,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2354 mpt_read_ioc_pg_1(ioc); 2541 mpt_read_ioc_pg_1(ioc);
2355 2542
2356 mpt_read_ioc_pg_4(ioc); 2543 mpt_read_ioc_pg_4(ioc);
2544
2545 break;
2357 } 2546 }
2358 2547
2359 GetIoUnitPage2(ioc); 2548 GetIoUnitPage2(ioc);
@@ -2435,16 +2624,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
2435 if (_pcidev == peer) { 2624 if (_pcidev == peer) {
2436 /* Paranoia checks */ 2625 /* Paranoia checks */
2437 if (ioc->alt_ioc != NULL) { 2626 if (ioc->alt_ioc != NULL) {
2438 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2627 printk(MYIOC_s_WARN_FMT
2439 ioc->name, ioc->alt_ioc->name); 2628 "Oops, already bound (%s <==> %s)!\n",
2629 ioc->name, ioc->name, ioc->alt_ioc->name);
2440 break; 2630 break;
2441 } else if (ioc_srch->alt_ioc != NULL) { 2631 } else if (ioc_srch->alt_ioc != NULL) {
2442 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2632 printk(MYIOC_s_WARN_FMT
2443 ioc_srch->name, ioc_srch->alt_ioc->name); 2633 "Oops, already bound (%s <==> %s)!\n",
2634 ioc_srch->name, ioc_srch->name,
2635 ioc_srch->alt_ioc->name);
2444 break; 2636 break;
2445 } 2637 }
2446 dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n", 2638 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2447 ioc->name, ioc_srch->name)); 2639 "FOUND! binding %s <==> %s\n",
2640 ioc->name, ioc->name, ioc_srch->name));
2448 ioc_srch->alt_ioc = ioc; 2641 ioc_srch->alt_ioc = ioc;
2449 ioc->alt_ioc = ioc_srch; 2642 ioc->alt_ioc = ioc_srch;
2450 } 2643 }
@@ -2464,8 +2657,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2464 int ret; 2657 int ret;
2465 2658
2466 if (ioc->cached_fw != NULL) { 2659 if (ioc->cached_fw != NULL) {
2467 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2660 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2468 "adapter\n", __func__, ioc->name)); 2661 "%s: Pushing FW onto adapter\n", __func__, ioc->name));
2469 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2662 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2470 ioc->cached_fw, CAN_SLEEP)) < 0) { 2663 ioc->cached_fw, CAN_SLEEP)) < 0) {
2471 printk(MYIOC_s_WARN_FMT 2664 printk(MYIOC_s_WARN_FMT
@@ -2474,11 +2667,30 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2474 } 2667 }
2475 } 2668 }
2476 2669
2670 /*
2671 * Put the controller into ready state (if its not already)
2672 */
2673 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
2674 if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
2675 CAN_SLEEP)) {
2676 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
2677 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
2678 "reset failed to put ioc in ready state!\n",
2679 ioc->name, __func__);
2680 } else
2681 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
2682 "failed!\n", ioc->name, __func__);
2683 }
2684
2685
2477 /* Disable adapter interrupts! */ 2686 /* Disable adapter interrupts! */
2687 synchronize_irq(ioc->pcidev->irq);
2478 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2688 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2479 ioc->active = 0; 2689 ioc->active = 0;
2690
2480 /* Clear any lingering interrupt */ 2691 /* Clear any lingering interrupt */
2481 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2692 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2693 CHIPREG_READ32(&ioc->chip->IntStatus);
2482 2694
2483 if (ioc->alloc != NULL) { 2695 if (ioc->alloc != NULL) {
2484 sz = ioc->alloc_sz; 2696 sz = ioc->alloc_sz;
@@ -2538,19 +2750,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2538 if((ret = mpt_host_page_access_control(ioc, 2750 if((ret = mpt_host_page_access_control(ioc,
2539 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { 2751 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2540 printk(MYIOC_s_ERR_FMT 2752 printk(MYIOC_s_ERR_FMT
2541 "host page buffers free failed (%d)!\n", 2753 ": %s: host page buffers free failed (%d)!\n",
2542 ioc->name, ret); 2754 ioc->name, __func__, ret);
2543 } 2755 }
2544 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n", 2756 dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2545 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); 2757 "HostPageBuffer free @ %p, sz=%d bytes\n",
2758 ioc->name, ioc->HostPageBuffer,
2759 ioc->HostPageBuffer_sz));
2546 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, 2760 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2547 ioc->HostPageBuffer, ioc->HostPageBuffer_dma); 2761 ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
2548 ioc->HostPageBuffer = NULL; 2762 ioc->HostPageBuffer = NULL;
2549 ioc->HostPageBuffer_sz = 0; 2763 ioc->HostPageBuffer_sz = 0;
2550 ioc->alloc_total -= ioc->HostPageBuffer_sz; 2764 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2551 } 2765 }
2552}
2553 2766
2767 pci_set_drvdata(ioc->pcidev, NULL);
2768}
2554/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2769/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2555/** 2770/**
2556 * mpt_adapter_dispose - Free all resources associated with an MPT adapter 2771 * mpt_adapter_dispose - Free all resources associated with an MPT adapter
@@ -2690,8 +2905,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2690 } 2905 }
2691 2906
2692 /* Is it already READY? */ 2907 /* Is it already READY? */
2693 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) 2908 if (!statefault &&
2909 ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
2910 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2911 "IOC is in READY state\n", ioc->name));
2694 return 0; 2912 return 0;
2913 }
2695 2914
2696 /* 2915 /*
2697 * Check to see if IOC is in FAULT state. 2916 * Check to see if IOC is in FAULT state.
@@ -2764,8 +2983,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2764 2983
2765 ii++; cntdn--; 2984 ii++; cntdn--;
2766 if (!cntdn) { 2985 if (!cntdn) {
2767 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 2986 printk(MYIOC_s_ERR_FMT
2768 ioc->name, (int)((ii+5)/HZ)); 2987 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
2988 ioc->name, ioc_state, (int)((ii+5)/HZ));
2769 return -ETIME; 2989 return -ETIME;
2770 } 2990 }
2771 2991
@@ -2778,9 +2998,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2778 } 2998 }
2779 2999
2780 if (statefault < 3) { 3000 if (statefault < 3) {
2781 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", 3001 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
2782 ioc->name, 3002 statefault == 1 ? "stuck handshake" : "IOC FAULT");
2783 statefault==1 ? "stuck handshake" : "IOC FAULT");
2784 } 3003 }
2785 3004
2786 return hard_reset_done; 3005 return hard_reset_done;
@@ -2833,8 +3052,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2833 3052
2834 /* IOC *must* NOT be in RESET state! */ 3053 /* IOC *must* NOT be in RESET state! */
2835 if (ioc->last_state == MPI_IOC_STATE_RESET) { 3054 if (ioc->last_state == MPI_IOC_STATE_RESET) {
2836 printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n", 3055 printk(KERN_ERR MYNAM
2837 ioc->name, ioc->last_state ); 3056 ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
3057 ioc->name, ioc->last_state);
2838 return -44; 3058 return -44;
2839 } 3059 }
2840 3060
@@ -2896,7 +3116,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2896 * Old: u16{Major(4),Minor(4),SubMinor(8)} 3116 * Old: u16{Major(4),Minor(4),SubMinor(8)}
2897 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} 3117 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
2898 */ 3118 */
2899 if (facts->MsgVersion < 0x0102) { 3119 if (facts->MsgVersion < MPI_VERSION_01_02) {
2900 /* 3120 /*
2901 * Handle old FC f/w style, convert to new... 3121 * Handle old FC f/w style, convert to new...
2902 */ 3122 */
@@ -2908,9 +3128,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2908 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); 3128 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
2909 3129
2910 facts->ProductID = le16_to_cpu(facts->ProductID); 3130 facts->ProductID = le16_to_cpu(facts->ProductID);
3131
2911 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) 3132 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2912 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) 3133 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
2913 ioc->ir_firmware = 1; 3134 ioc->ir_firmware = 1;
3135
2914 facts->CurrentHostMfaHighAddr = 3136 facts->CurrentHostMfaHighAddr =
2915 le32_to_cpu(facts->CurrentHostMfaHighAddr); 3137 le32_to_cpu(facts->CurrentHostMfaHighAddr);
2916 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); 3138 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
@@ -2926,7 +3148,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2926 * to 14 in MPI-1.01.0x. 3148 * to 14 in MPI-1.01.0x.
2927 */ 3149 */
2928 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && 3150 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
2929 facts->MsgVersion > 0x0100) { 3151 facts->MsgVersion > MPI_VERSION_01_00) {
2930 facts->FWImageSize = le32_to_cpu(facts->FWImageSize); 3152 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
2931 } 3153 }
2932 3154
@@ -3108,6 +3330,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3108 3330
3109 ioc_init.MaxDevices = (U8)ioc->devices_per_bus; 3331 ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
3110 ioc_init.MaxBuses = (U8)ioc->number_of_buses; 3332 ioc_init.MaxBuses = (U8)ioc->number_of_buses;
3333
3111 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", 3334 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
3112 ioc->name, ioc->facts.MsgVersion)); 3335 ioc->name, ioc->facts.MsgVersion));
3113 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { 3336 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
@@ -3122,7 +3345,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3122 } 3345 }
3123 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 3346 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
3124 3347
3125 if (sizeof(dma_addr_t) == sizeof(u64)) { 3348 if (ioc->sg_addr_size == sizeof(u64)) {
3126 /* Save the upper 32-bits of the request 3349 /* Save the upper 32-bits of the request
3127 * (reply) and sense buffers. 3350 * (reply) and sense buffers.
3128 */ 3351 */
@@ -3325,11 +3548,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3325 FWUpload_t *prequest; 3548 FWUpload_t *prequest;
3326 FWUploadReply_t *preply; 3549 FWUploadReply_t *preply;
3327 FWUploadTCSGE_t *ptcsge; 3550 FWUploadTCSGE_t *ptcsge;
3328 int sgeoffset;
3329 u32 flagsLength; 3551 u32 flagsLength;
3330 int ii, sz, reply_sz; 3552 int ii, sz, reply_sz;
3331 int cmdStatus; 3553 int cmdStatus;
3332 3554 int request_size;
3333 /* If the image size is 0, we are done. 3555 /* If the image size is 0, we are done.
3334 */ 3556 */
3335 if ((sz = ioc->facts.FWImageSize) == 0) 3557 if ((sz = ioc->facts.FWImageSize) == 0)
@@ -3364,42 +3586,41 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3364 ptcsge->ImageSize = cpu_to_le32(sz); 3586 ptcsge->ImageSize = cpu_to_le32(sz);
3365 ptcsge++; 3587 ptcsge++;
3366 3588
3367 sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
3368
3369 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; 3589 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
3370 mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); 3590 ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
3371 3591 request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
3372 sgeoffset += sizeof(u32) + sizeof(dma_addr_t); 3592 ioc->SGE_size;
3373 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n", 3593 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
3374 ioc->name, prequest, sgeoffset)); 3594 " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
3595 ioc->facts.FWImageSize, request_size));
3375 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); 3596 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
3376 3597
3377 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, 3598 ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
3378 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); 3599 reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
3379 3600
3380 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); 3601 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
3602 "rc=%x \n", ioc->name, ii));
3381 3603
3382 cmdStatus = -EFAULT; 3604 cmdStatus = -EFAULT;
3383 if (ii == 0) { 3605 if (ii == 0) {
3384 /* Handshake transfer was complete and successful. 3606 /* Handshake transfer was complete and successful.
3385 * Check the Reply Frame. 3607 * Check the Reply Frame.
3386 */ 3608 */
3387 int status, transfer_sz; 3609 int status;
3388 status = le16_to_cpu(preply->IOCStatus); 3610 status = le16_to_cpu(preply->IOCStatus) &
3389 if (status == MPI_IOCSTATUS_SUCCESS) { 3611 MPI_IOCSTATUS_MASK;
3390 transfer_sz = le32_to_cpu(preply->ActualImageSize); 3612 if (status == MPI_IOCSTATUS_SUCCESS &&
3391 if (transfer_sz == sz) 3613 ioc->facts.FWImageSize ==
3614 le32_to_cpu(preply->ActualImageSize))
3392 cmdStatus = 0; 3615 cmdStatus = 0;
3393 }
3394 } 3616 }
3395 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", 3617 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
3396 ioc->name, cmdStatus)); 3618 ioc->name, cmdStatus));
3397 3619
3398 3620
3399 if (cmdStatus) { 3621 if (cmdStatus) {
3400 3622 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
3401 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n", 3623 "freeing image \n", ioc->name));
3402 ioc->name));
3403 mpt_free_fw_memory(ioc); 3624 mpt_free_fw_memory(ioc);
3404 } 3625 }
3405 kfree(prequest); 3626 kfree(prequest);
@@ -3723,6 +3944,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3723 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3944 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3724 3945
3725 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3946 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3947
3948 if (!ignore)
3949 return 0;
3950
3726 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3951 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3727 "address=%p\n", ioc->name, __func__, 3952 "address=%p\n", ioc->name, __func__,
3728 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3953 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
@@ -3740,6 +3965,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3740 "looking for READY STATE: doorbell=%x" 3965 "looking for READY STATE: doorbell=%x"
3741 " count=%d\n", 3966 " count=%d\n",
3742 ioc->name, doorbell, count)); 3967 ioc->name, doorbell, count));
3968
3743 if (doorbell == MPI_IOC_STATE_READY) { 3969 if (doorbell == MPI_IOC_STATE_READY) {
3744 return 1; 3970 return 1;
3745 } 3971 }
@@ -3890,6 +4116,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3890 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 4116 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3891 doorbell &= MPI_IOC_STATE_MASK; 4117 doorbell &= MPI_IOC_STATE_MASK;
3892 4118
4119 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4120 "looking for READY STATE: doorbell=%x"
4121 " count=%d\n", ioc->name, doorbell, count));
4122
3893 if (doorbell == MPI_IOC_STATE_READY) { 4123 if (doorbell == MPI_IOC_STATE_READY) {
3894 break; 4124 break;
3895 } 4125 }
@@ -3901,6 +4131,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3901 mdelay (1000); 4131 mdelay (1000);
3902 } 4132 }
3903 } 4133 }
4134
4135 if (doorbell != MPI_IOC_STATE_READY)
4136 printk(MYIOC_s_ERR_FMT "Failed to come READY "
4137 "after reset! IocState=%x", ioc->name,
4138 doorbell);
3904 } 4139 }
3905 } 4140 }
3906 4141
@@ -4019,8 +4254,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
4019 if (sleepFlag != CAN_SLEEP) 4254 if (sleepFlag != CAN_SLEEP)
4020 count *= 10; 4255 count *= 10;
4021 4256
4022 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 4257 printk(MYIOC_s_ERR_FMT
4023 ioc->name, (int)((count+5)/HZ)); 4258 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
4259 ioc->name, state, (int)((count+5)/HZ));
4024 return -ETIME; 4260 return -ETIME;
4025 } 4261 }
4026 4262
@@ -4090,24 +4326,29 @@ initChainBuffers(MPT_ADAPTER *ioc)
4090 * num_sge = num sge in request frame + last chain buffer 4326 * num_sge = num sge in request frame + last chain buffer
4091 * scale = num sge per chain buffer if no chain element 4327 * scale = num sge per chain buffer if no chain element
4092 */ 4328 */
4093 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4329 scale = ioc->req_sz / ioc->SGE_size;
4094 if (sizeof(dma_addr_t) == sizeof(u64)) 4330 if (ioc->sg_addr_size == sizeof(u64))
4095 num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4331 num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
4096 else 4332 else
4097 num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4333 num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
4098 4334
4099 if (sizeof(dma_addr_t) == sizeof(u64)) { 4335 if (ioc->sg_addr_size == sizeof(u64)) {
4100 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4336 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
4101 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4337 (ioc->req_sz - 60) / ioc->SGE_size;
4102 } else { 4338 } else {
4103 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4339 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
4104 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4340 scale + (ioc->req_sz - 64) / ioc->SGE_size;
4105 } 4341 }
4106 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", 4342 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
4107 ioc->name, num_sge, numSGE)); 4343 ioc->name, num_sge, numSGE));
4108 4344
4109 if ( numSGE > MPT_SCSI_SG_DEPTH ) 4345 if (ioc->bus_type == FC) {
4110 numSGE = MPT_SCSI_SG_DEPTH; 4346 if (numSGE > MPT_SCSI_FC_SG_DEPTH)
4347 numSGE = MPT_SCSI_FC_SG_DEPTH;
4348 } else {
4349 if (numSGE > MPT_SCSI_SG_DEPTH)
4350 numSGE = MPT_SCSI_SG_DEPTH;
4351 }
4111 4352
4112 num_chain = 1; 4353 num_chain = 1;
4113 while (numSGE - num_sge > 0) { 4354 while (numSGE - num_sge > 0) {
@@ -4161,12 +4402,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4161 dma_addr_t alloc_dma; 4402 dma_addr_t alloc_dma;
4162 u8 *mem; 4403 u8 *mem;
4163 int i, reply_sz, sz, total_size, num_chain; 4404 int i, reply_sz, sz, total_size, num_chain;
4405 u64 dma_mask;
4406
4407 dma_mask = 0;
4164 4408
4165 /* Prime reply FIFO... */ 4409 /* Prime reply FIFO... */
4166 4410
4167 if (ioc->reply_frames == NULL) { 4411 if (ioc->reply_frames == NULL) {
4168 if ( (num_chain = initChainBuffers(ioc)) < 0) 4412 if ( (num_chain = initChainBuffers(ioc)) < 0)
4169 return -1; 4413 return -1;
4414 /*
4415 * 1078 errata workaround for the 36GB limitation
4416 */
4417 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
4418 ioc->dma_mask > DMA_35BIT_MASK) {
4419 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
4420 && !pci_set_consistent_dma_mask(ioc->pcidev,
4421 DMA_BIT_MASK(32))) {
4422 dma_mask = DMA_35BIT_MASK;
4423 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4424 "setting 35 bit addressing for "
4425 "Request/Reply/Chain and Sense Buffers\n",
4426 ioc->name));
4427 } else {
4428 /*Reseting DMA mask to 64 bit*/
4429 pci_set_dma_mask(ioc->pcidev,
4430 DMA_BIT_MASK(64));
4431 pci_set_consistent_dma_mask(ioc->pcidev,
4432 DMA_BIT_MASK(64));
4433
4434 printk(MYIOC_s_ERR_FMT
4435 "failed setting 35 bit addressing for "
4436 "Request/Reply/Chain and Sense Buffers\n",
4437 ioc->name);
4438 return -1;
4439 }
4440 }
4170 4441
4171 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); 4442 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
4172 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", 4443 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
@@ -4305,9 +4576,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4305 alloc_dma += ioc->reply_sz; 4576 alloc_dma += ioc->reply_sz;
4306 } 4577 }
4307 4578
4579 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4580 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
4581 ioc->dma_mask))
4582 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4583 "restoring 64 bit addressing\n", ioc->name));
4584
4308 return 0; 4585 return 0;
4309 4586
4310out_fail: 4587out_fail:
4588
4311 if (ioc->alloc != NULL) { 4589 if (ioc->alloc != NULL) {
4312 sz = ioc->alloc_sz; 4590 sz = ioc->alloc_sz;
4313 pci_free_consistent(ioc->pcidev, 4591 pci_free_consistent(ioc->pcidev,
@@ -4324,6 +4602,13 @@ out_fail:
4324 ioc->sense_buf_pool, ioc->sense_buf_pool_dma); 4602 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
4325 ioc->sense_buf_pool = NULL; 4603 ioc->sense_buf_pool = NULL;
4326 } 4604 }
4605
4606 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4607 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
4608 DMA_BIT_MASK(64)))
4609 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4610 "restoring 64 bit addressing\n", ioc->name));
4611
4327 return -1; 4612 return -1;
4328} 4613}
4329 4614
@@ -4759,7 +5044,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4759 SasIoUnitControlReply_t *sasIoUnitCntrReply; 5044 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4760 MPT_FRAME_HDR *mf = NULL; 5045 MPT_FRAME_HDR *mf = NULL;
4761 MPIHeader_t *mpi_hdr; 5046 MPIHeader_t *mpi_hdr;
5047 int ret = 0;
5048 unsigned long timeleft;
5049
5050 mutex_lock(&ioc->mptbase_cmds.mutex);
4762 5051
5052 /* init the internal cmd struct */
5053 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
5054 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
4763 5055
4764 /* insure garbage is not sent to fw */ 5056 /* insure garbage is not sent to fw */
4765 switch(persist_opcode) { 5057 switch(persist_opcode) {
@@ -4769,17 +5061,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4769 break; 5061 break;
4770 5062
4771 default: 5063 default:
4772 return -1; 5064 ret = -1;
4773 break; 5065 goto out;
4774 } 5066 }
4775 5067
4776 printk("%s: persist_opcode=%x\n",__func__, persist_opcode); 5068 printk(KERN_DEBUG "%s: persist_opcode=%x\n",
5069 __func__, persist_opcode);
4777 5070
4778 /* Get a MF for this command. 5071 /* Get a MF for this command.
4779 */ 5072 */
4780 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5073 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4781 printk("%s: no msg frames!\n",__func__); 5074 printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
4782 return -1; 5075 ret = -1;
5076 goto out;
4783 } 5077 }
4784 5078
4785 mpi_hdr = (MPIHeader_t *) mf; 5079 mpi_hdr = (MPIHeader_t *) mf;
@@ -4789,27 +5083,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4789 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; 5083 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4790 sasIoUnitCntrReq->Operation = persist_opcode; 5084 sasIoUnitCntrReq->Operation = persist_opcode;
4791 5085
4792 init_timer(&ioc->persist_timer);
4793 ioc->persist_timer.data = (unsigned long) ioc;
4794 ioc->persist_timer.function = mpt_timer_expired;
4795 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4796 ioc->persist_wait_done=0;
4797 add_timer(&ioc->persist_timer);
4798 mpt_put_msg_frame(mpt_base_index, ioc, mf); 5086 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4799 wait_event(mpt_waitq, ioc->persist_wait_done); 5087 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
5088 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
5089 ret = -ETIME;
5090 printk(KERN_DEBUG "%s: failed\n", __func__);
5091 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
5092 goto out;
5093 if (!timeleft) {
5094 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
5095 ioc->name, __func__);
5096 mpt_HardResetHandler(ioc, CAN_SLEEP);
5097 mpt_free_msg_frame(ioc, mf);
5098 }
5099 goto out;
5100 }
5101
5102 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
5103 ret = -1;
5104 goto out;
5105 }
4800 5106
4801 sasIoUnitCntrReply = 5107 sasIoUnitCntrReply =
4802 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 5108 (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
4803 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 5109 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4804 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 5110 printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4805 __func__, 5111 __func__, sasIoUnitCntrReply->IOCStatus,
4806 sasIoUnitCntrReply->IOCStatus,
4807 sasIoUnitCntrReply->IOCLogInfo); 5112 sasIoUnitCntrReply->IOCLogInfo);
4808 return -1; 5113 printk(KERN_DEBUG "%s: failed\n", __func__);
4809 } 5114 ret = -1;
5115 } else
5116 printk(KERN_DEBUG "%s: success\n", __func__);
5117 out:
4810 5118
4811 printk("%s: success\n",__func__); 5119 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
4812 return 0; 5120 mutex_unlock(&ioc->mptbase_cmds.mutex);
5121 return ret;
4813} 5122}
4814 5123
4815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5124/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5394,17 +5703,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
5394 * -ENOMEM if pci_alloc failed 5703 * -ENOMEM if pci_alloc failed
5395 **/ 5704 **/
5396int 5705int
5397mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk) 5706mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
5707 RaidPhysDiskPage0_t *phys_disk)
5398{ 5708{
5399 CONFIGPARMS cfg; 5709 CONFIGPARMS cfg;
5400 ConfigPageHeader_t hdr; 5710 ConfigPageHeader_t hdr;
5401 dma_addr_t dma_handle; 5711 dma_addr_t dma_handle;
5402 pRaidPhysDiskPage0_t buffer = NULL; 5712 pRaidPhysDiskPage0_t buffer = NULL;
5403 int rc; 5713 int rc;
5404 5714
5405 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5715 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5406 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5716 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5717 memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
5407 5718
5719 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
5408 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5720 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5409 cfg.cfghdr.hdr = &hdr; 5721 cfg.cfghdr.hdr = &hdr;
5410 cfg.physAddr = -1; 5722 cfg.physAddr = -1;
@@ -5451,6 +5763,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
5451} 5763}
5452 5764
5453/** 5765/**
5766 * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
5767 * @ioc: Pointer to a Adapter Structure
5768 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5769 *
5770 * Return:
5771 * returns number paths
5772 **/
5773int
5774mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
5775{
5776 CONFIGPARMS cfg;
5777 ConfigPageHeader_t hdr;
5778 dma_addr_t dma_handle;
5779 pRaidPhysDiskPage1_t buffer = NULL;
5780 int rc;
5781
5782 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5783 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5784
5785 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5786 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5787 hdr.PageNumber = 1;
5788 cfg.cfghdr.hdr = &hdr;
5789 cfg.physAddr = -1;
5790 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5791
5792 if (mpt_config(ioc, &cfg) != 0) {
5793 rc = 0;
5794 goto out;
5795 }
5796
5797 if (!hdr.PageLength) {
5798 rc = 0;
5799 goto out;
5800 }
5801
5802 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5803 &dma_handle);
5804
5805 if (!buffer) {
5806 rc = 0;
5807 goto out;
5808 }
5809
5810 cfg.physAddr = dma_handle;
5811 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5812 cfg.pageAddr = phys_disk_num;
5813
5814 if (mpt_config(ioc, &cfg) != 0) {
5815 rc = 0;
5816 goto out;
5817 }
5818
5819 rc = buffer->NumPhysDiskPaths;
5820 out:
5821
5822 if (buffer)
5823 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5824 dma_handle);
5825
5826 return rc;
5827}
5828EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
5829
5830/**
5831 * mpt_raid_phys_disk_pg1 - returns phys disk page 1
5832 * @ioc: Pointer to a Adapter Structure
5833 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5834 * @phys_disk: requested payload data returned
5835 *
5836 * Return:
5837 * 0 on success
5838 * -EFAULT if read of config page header fails or data pointer not NULL
5839 * -ENOMEM if pci_alloc failed
5840 **/
5841int
5842mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
5843 RaidPhysDiskPage1_t *phys_disk)
5844{
5845 CONFIGPARMS cfg;
5846 ConfigPageHeader_t hdr;
5847 dma_addr_t dma_handle;
5848 pRaidPhysDiskPage1_t buffer = NULL;
5849 int rc;
5850 int i;
5851 __le64 sas_address;
5852
5853 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5854 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5855 rc = 0;
5856
5857 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5858 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5859 hdr.PageNumber = 1;
5860 cfg.cfghdr.hdr = &hdr;
5861 cfg.physAddr = -1;
5862 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5863
5864 if (mpt_config(ioc, &cfg) != 0) {
5865 rc = -EFAULT;
5866 goto out;
5867 }
5868
5869 if (!hdr.PageLength) {
5870 rc = -EFAULT;
5871 goto out;
5872 }
5873
5874 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5875 &dma_handle);
5876
5877 if (!buffer) {
5878 rc = -ENOMEM;
5879 goto out;
5880 }
5881
5882 cfg.physAddr = dma_handle;
5883 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5884 cfg.pageAddr = phys_disk_num;
5885
5886 if (mpt_config(ioc, &cfg) != 0) {
5887 rc = -EFAULT;
5888 goto out;
5889 }
5890
5891 phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
5892 phys_disk->PhysDiskNum = phys_disk_num;
5893 for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
5894 phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
5895 phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
5896 phys_disk->Path[i].OwnerIdentifier =
5897 buffer->Path[i].OwnerIdentifier;
5898 phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
5899 memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
5900 sas_address = le64_to_cpu(sas_address);
5901 memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
5902 memcpy(&sas_address,
5903 &buffer->Path[i].OwnerWWID, sizeof(__le64));
5904 sas_address = le64_to_cpu(sas_address);
5905 memcpy(&phys_disk->Path[i].OwnerWWID,
5906 &sas_address, sizeof(__le64));
5907 }
5908
5909 out:
5910
5911 if (buffer)
5912 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5913 dma_handle);
5914
5915 return rc;
5916}
5917EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
5918
5919
5920/**
5454 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes 5921 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
5455 * @ioc: Pointer to a Adapter Strucutre 5922 * @ioc: Pointer to a Adapter Strucutre
5456 * 5923 *
@@ -5775,30 +6242,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
5775 * SendEventNotification - Send EventNotification (on or off) request to adapter 6242 * SendEventNotification - Send EventNotification (on or off) request to adapter
5776 * @ioc: Pointer to MPT_ADAPTER structure 6243 * @ioc: Pointer to MPT_ADAPTER structure
5777 * @EvSwitch: Event switch flags 6244 * @EvSwitch: Event switch flags
6245 * @sleepFlag: Specifies whether the process can sleep
5778 */ 6246 */
5779static int 6247static int
5780SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch) 6248SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
5781{ 6249{
5782 EventNotification_t *evnp; 6250 EventNotification_t evn;
6251 MPIDefaultReply_t reply_buf;
5783 6252
5784 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); 6253 memset(&evn, 0, sizeof(EventNotification_t));
5785 if (evnp == NULL) { 6254 memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
5786 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
5787 ioc->name));
5788 return 0;
5789 }
5790 memset(evnp, 0, sizeof(*evnp));
5791
5792 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
5793 6255
5794 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 6256 evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
5795 evnp->ChainOffset = 0; 6257 evn.Switch = EvSwitch;
5796 evnp->MsgFlags = 0; 6258 evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
5797 evnp->Switch = EvSwitch;
5798 6259
5799 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp); 6260 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6261 "Sending EventNotification (%d) request %p\n",
6262 ioc->name, EvSwitch, &evn));
5800 6263
5801 return 0; 6264 return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
6265 (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
6266 sleepFlag);
5802} 6267}
5803 6268
5804/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6269/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5814,7 +6279,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5814 6279
5815 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6280 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5816 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 6281 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5817 ioc->name,__func__)); 6282 ioc->name, __func__));
5818 return -1; 6283 return -1;
5819 } 6284 }
5820 6285
@@ -5851,12 +6316,19 @@ int
5851mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 6316mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5852{ 6317{
5853 Config_t *pReq; 6318 Config_t *pReq;
6319 ConfigReply_t *pReply;
5854 ConfigExtendedPageHeader_t *pExtHdr = NULL; 6320 ConfigExtendedPageHeader_t *pExtHdr = NULL;
5855 MPT_FRAME_HDR *mf; 6321 MPT_FRAME_HDR *mf;
5856 unsigned long flags; 6322 int ii;
5857 int ii, rc;
5858 int flagsLength; 6323 int flagsLength;
5859 int in_isr; 6324 long timeout;
6325 int ret;
6326 u8 page_type = 0, extend_page;
6327 unsigned long timeleft;
6328 unsigned long flags;
6329 int in_isr;
6330 u8 issue_hard_reset = 0;
6331 u8 retry_count = 0;
5860 6332
5861 /* Prevent calling wait_event() (below), if caller happens 6333 /* Prevent calling wait_event() (below), if caller happens
5862 * to be in ISR context, because that is fatal! 6334 * to be in ISR context, because that is fatal!
@@ -5866,15 +6338,43 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5866 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", 6338 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
5867 ioc->name)); 6339 ioc->name));
5868 return -EPERM; 6340 return -EPERM;
6341 }
6342
6343 /* don't send a config page during diag reset */
6344 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6345 if (ioc->ioc_reset_in_progress) {
6346 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6347 "%s: busy with host reset\n", ioc->name, __func__));
6348 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6349 return -EBUSY;
6350 }
6351 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6352
6353 /* don't send if no chance of success */
6354 if (!ioc->active ||
6355 mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
6356 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6357 "%s: ioc not operational, %d, %xh\n",
6358 ioc->name, __func__, ioc->active,
6359 mpt_GetIocState(ioc, 0)));
6360 return -EFAULT;
5869 } 6361 }
5870 6362
6363 retry_config:
6364 mutex_lock(&ioc->mptbase_cmds.mutex);
6365 /* init the internal cmd struct */
6366 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
6367 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
6368
5871 /* Get and Populate a free Frame 6369 /* Get and Populate a free Frame
5872 */ 6370 */
5873 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6371 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5874 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", 6372 dcprintk(ioc, printk(MYIOC_s_WARN_FMT
5875 ioc->name)); 6373 "mpt_config: no msg frames!\n", ioc->name));
5876 return -EAGAIN; 6374 ret = -EAGAIN;
6375 goto out;
5877 } 6376 }
6377
5878 pReq = (Config_t *)mf; 6378 pReq = (Config_t *)mf;
5879 pReq->Action = pCfg->action; 6379 pReq->Action = pCfg->action;
5880 pReq->Reserved = 0; 6380 pReq->Reserved = 0;
@@ -5900,7 +6400,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5900 pReq->ExtPageType = pExtHdr->ExtPageType; 6400 pReq->ExtPageType = pExtHdr->ExtPageType;
5901 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 6401 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
5902 6402
5903 /* Page Length must be treated as a reserved field for the extended header. */ 6403 /* Page Length must be treated as a reserved field for the
6404 * extended header.
6405 */
5904 pReq->Header.PageLength = 0; 6406 pReq->Header.PageLength = 0;
5905 } 6407 }
5906 6408
@@ -5913,78 +6415,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5913 else 6415 else
5914 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 6416 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
5915 6417
5916 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { 6418 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
6419 MPI_CONFIG_PAGETYPE_EXTENDED) {
5917 flagsLength |= pExtHdr->ExtPageLength * 4; 6420 flagsLength |= pExtHdr->ExtPageLength * 4;
5918 6421 page_type = pReq->ExtPageType;
5919 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6422 extend_page = 1;
5920 ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action)); 6423 } else {
5921 }
5922 else {
5923 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; 6424 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
5924 6425 page_type = pReq->Header.PageType;
5925 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6426 extend_page = 0;
5926 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
5927 } 6427 }
5928 6428
5929 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); 6429 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5930 6430 "Sending Config request type 0x%x, page 0x%x and action %d\n",
5931 /* Append pCfg pointer to end of mf 6431 ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
5932 */
5933 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5934
5935 /* Initalize the timer
5936 */
5937 init_timer_on_stack(&pCfg->timer);
5938 pCfg->timer.data = (unsigned long) ioc;
5939 pCfg->timer.function = mpt_timer_expired;
5940 pCfg->wait_done = 0;
5941
5942 /* Set the timer; ensure 10 second minimum */
5943 if (pCfg->timeout < 10)
5944 pCfg->timer.expires = jiffies + HZ*10;
5945 else
5946 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5947
5948 /* Add to end of Q, set timer and then issue this command */
5949 spin_lock_irqsave(&ioc->FreeQlock, flags);
5950 list_add_tail(&pCfg->linkage, &ioc->configQ);
5951 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5952 6432
5953 add_timer(&pCfg->timer); 6433 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
6434 timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
5954 mpt_put_msg_frame(mpt_base_index, ioc, mf); 6435 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5955 wait_event(mpt_waitq, pCfg->wait_done); 6436 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
6437 timeout);
6438 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
6439 ret = -ETIME;
6440 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6441 "Failed Sending Config request type 0x%x, page 0x%x,"
6442 " action %d, status %xh, time left %ld\n\n",
6443 ioc->name, page_type, pReq->Header.PageNumber,
6444 pReq->Action, ioc->mptbase_cmds.status, timeleft));
6445 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
6446 goto out;
6447 if (!timeleft)
6448 issue_hard_reset = 1;
6449 goto out;
6450 }
5956 6451
5957 /* mf has been freed - do not access */ 6452 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
6453 ret = -1;
6454 goto out;
6455 }
6456 pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
6457 ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
6458 if (ret == MPI_IOCSTATUS_SUCCESS) {
6459 if (extend_page) {
6460 pCfg->cfghdr.ehdr->ExtPageLength =
6461 le16_to_cpu(pReply->ExtPageLength);
6462 pCfg->cfghdr.ehdr->ExtPageType =
6463 pReply->ExtPageType;
6464 }
6465 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
6466 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
6467 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
6468 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
5958 6469
5959 rc = pCfg->status; 6470 }
5960 6471
5961 return rc; 6472 if (retry_count)
5962} 6473 printk(MYIOC_s_INFO_FMT "Retry completed "
6474 "ret=0x%x timeleft=%ld\n",
6475 ioc->name, ret, timeleft);
5963 6476
5964/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6477 dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
5965/** 6478 ret, le32_to_cpu(pReply->IOCLogInfo)));
5966 * mpt_timer_expired - Callback for timer process.
5967 * Used only internal config functionality.
5968 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
5969 */
5970static void
5971mpt_timer_expired(unsigned long data)
5972{
5973 MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
5974
5975 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name));
5976 6479
5977 /* Perform a FW reload */ 6480out:
5978 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
5979 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
5980 6481
5981 /* No more processing. 6482 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
5982 * Hard reset clean-up will wake up 6483 mutex_unlock(&ioc->mptbase_cmds.mutex);
5983 * process and free all resources. 6484 if (issue_hard_reset) {
5984 */ 6485 issue_hard_reset = 0;
5985 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name)); 6486 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
6487 ioc->name, __func__);
6488 mpt_HardResetHandler(ioc, CAN_SLEEP);
6489 mpt_free_msg_frame(ioc, mf);
6490 /* attempt one retry for a timed out command */
6491 if (!retry_count) {
6492 printk(MYIOC_s_INFO_FMT
6493 "Attempting Retry Config request"
6494 " type 0x%x, page 0x%x,"
6495 " action %d\n", ioc->name, page_type,
6496 pCfg->cfghdr.hdr->PageNumber, pCfg->action);
6497 retry_count++;
6498 goto retry_config;
6499 }
6500 }
6501 return ret;
5986 6502
5987 return;
5988} 6503}
5989 6504
5990/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6505/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,41 +6513,34 @@ mpt_timer_expired(unsigned long data)
5998static int 6513static int
5999mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 6514mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
6000{ 6515{
6001 CONFIGPARMS *pCfg; 6516 switch (reset_phase) {
6002 unsigned long flags; 6517 case MPT_IOC_SETUP_RESET:
6003 6518 ioc->taskmgmt_quiesce_io = 1;
6004 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6519 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6005 ": IOC %s_reset routed to MPT base driver!\n", 6520 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
6006 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 6521 break;
6007 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); 6522 case MPT_IOC_PRE_RESET:
6008 6523 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6009 if (reset_phase == MPT_IOC_SETUP_RESET) { 6524 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
6010 ; 6525 break;
6011 } else if (reset_phase == MPT_IOC_PRE_RESET) { 6526 case MPT_IOC_POST_RESET:
6012 /* If the internal config Q is not empty - 6527 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6013 * delete timer. MF resources will be freed when 6528 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
6014 * the FIFO's are primed. 6529/* wake up mptbase_cmds */
6015 */ 6530 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
6016 spin_lock_irqsave(&ioc->FreeQlock, flags); 6531 ioc->mptbase_cmds.status |=
6017 list_for_each_entry(pCfg, &ioc->configQ, linkage) 6532 MPT_MGMT_STATUS_DID_IOCRESET;
6018 del_timer(&pCfg->timer); 6533 complete(&ioc->mptbase_cmds.done);
6019 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
6020
6021 } else {
6022 CONFIGPARMS *pNext;
6023
6024 /* Search the configQ for internal commands.
6025 * Flush the Q, and wake up all suspended threads.
6026 */
6027 spin_lock_irqsave(&ioc->FreeQlock, flags);
6028 list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
6029 list_del(&pCfg->linkage);
6030
6031 pCfg->status = MPT_CONFIG_ERROR;
6032 pCfg->wait_done = 1;
6033 wake_up(&mpt_waitq);
6034 } 6534 }
6035 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 6535/* wake up taskmgmt_cmds */
6536 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
6537 ioc->taskmgmt_cmds.status |=
6538 MPT_MGMT_STATUS_DID_IOCRESET;
6539 complete(&ioc->taskmgmt_cmds.done);
6540 }
6541 break;
6542 default:
6543 break;
6036 } 6544 }
6037 6545
6038 return 1; /* currently means nothing really */ 6546 return 1; /* currently means nothing really */
@@ -6344,6 +6852,59 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
6344 6852
6345 *size = y; 6853 *size = y;
6346} 6854}
6855/**
6856 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
6857 * @ioc: Pointer to MPT_ADAPTER structure
6858 *
6859 * Returns 0 for SUCCESS or -1 if FAILED.
6860 *
6861 * If -1 is return, then it was not possible to set the flags
6862 **/
6863int
6864mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6865{
6866 unsigned long flags;
6867 int retval;
6868
6869 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6870 if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
6871 (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
6872 retval = -1;
6873 goto out;
6874 }
6875 retval = 0;
6876 ioc->taskmgmt_in_progress = 1;
6877 ioc->taskmgmt_quiesce_io = 1;
6878 if (ioc->alt_ioc) {
6879 ioc->alt_ioc->taskmgmt_in_progress = 1;
6880 ioc->alt_ioc->taskmgmt_quiesce_io = 1;
6881 }
6882 out:
6883 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6884 return retval;
6885}
6886EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
6887
6888/**
6889 * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
6890 * @ioc: Pointer to MPT_ADAPTER structure
6891 *
6892 **/
6893void
6894mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6895{
6896 unsigned long flags;
6897
6898 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6899 ioc->taskmgmt_in_progress = 0;
6900 ioc->taskmgmt_quiesce_io = 0;
6901 if (ioc->alt_ioc) {
6902 ioc->alt_ioc->taskmgmt_in_progress = 0;
6903 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
6904 }
6905 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6906}
6907EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
6347 6908
6348 6909
6349/** 6910/**
@@ -6397,7 +6958,9 @@ int
6397mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 6958mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6398{ 6959{
6399 int rc; 6960 int rc;
6961 u8 cb_idx;
6400 unsigned long flags; 6962 unsigned long flags;
6963 unsigned long time_count;
6401 6964
6402 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); 6965 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
6403#ifdef MFCNT 6966#ifdef MFCNT
@@ -6410,14 +6973,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6410 /* Reset the adapter. Prevent more than 1 call to 6973 /* Reset the adapter. Prevent more than 1 call to
6411 * mpt_do_ioc_recovery at any instant in time. 6974 * mpt_do_ioc_recovery at any instant in time.
6412 */ 6975 */
6413 spin_lock_irqsave(&ioc->diagLock, flags); 6976 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6414 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ 6977 if (ioc->ioc_reset_in_progress) {
6415 spin_unlock_irqrestore(&ioc->diagLock, flags); 6978 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6416 return 0; 6979 return 0;
6417 } else {
6418 ioc->diagPending = 1;
6419 } 6980 }
6420 spin_unlock_irqrestore(&ioc->diagLock, flags); 6981 ioc->ioc_reset_in_progress = 1;
6982 if (ioc->alt_ioc)
6983 ioc->alt_ioc->ioc_reset_in_progress = 1;
6984 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6421 6985
6422 /* FIXME: If do_ioc_recovery fails, repeat.... 6986 /* FIXME: If do_ioc_recovery fails, repeat....
6423 */ 6987 */
@@ -6427,47 +6991,57 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6427 * Prevents timeouts occurring during a diagnostic reset...very bad. 6991 * Prevents timeouts occurring during a diagnostic reset...very bad.
6428 * For all other protocol drivers, this is a no-op. 6992 * For all other protocol drivers, this is a no-op.
6429 */ 6993 */
6430 { 6994 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6431 u8 cb_idx; 6995 if (MptResetHandlers[cb_idx]) {
6432 int r = 0; 6996 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6433 6997 if (ioc->alt_ioc)
6434 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 6998 mpt_signal_reset(cb_idx, ioc->alt_ioc,
6435 if (MptResetHandlers[cb_idx]) { 6999 MPT_IOC_SETUP_RESET);
6436 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n",
6437 ioc->name, cb_idx));
6438 r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6439 if (ioc->alt_ioc) {
6440 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n",
6441 ioc->name, ioc->alt_ioc->name, cb_idx));
6442 r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
6443 }
6444 }
6445 } 7000 }
6446 } 7001 }
6447 7002
6448 if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { 7003 time_count = jiffies;
6449 printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc); 7004 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
7005 if (rc != 0) {
7006 printk(KERN_WARNING MYNAM
7007 ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
7008 } else {
7009 if (ioc->hard_resets < -1)
7010 ioc->hard_resets++;
6450 } 7011 }
6451 ioc->reload_fw = 0;
6452 if (ioc->alt_ioc)
6453 ioc->alt_ioc->reload_fw = 0;
6454 7012
6455 spin_lock_irqsave(&ioc->diagLock, flags); 7013 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6456 ioc->diagPending = 0; 7014 ioc->ioc_reset_in_progress = 0;
6457 if (ioc->alt_ioc) 7015 ioc->taskmgmt_quiesce_io = 0;
6458 ioc->alt_ioc->diagPending = 0; 7016 ioc->taskmgmt_in_progress = 0;
6459 spin_unlock_irqrestore(&ioc->diagLock, flags); 7017 if (ioc->alt_ioc) {
7018 ioc->alt_ioc->ioc_reset_in_progress = 0;
7019 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
7020 ioc->alt_ioc->taskmgmt_in_progress = 0;
7021 }
7022 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6460 7023
6461 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); 7024 dtmprintk(ioc,
7025 printk(MYIOC_s_DEBUG_FMT
7026 "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
7027 jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
7028 "SUCCESS" : "FAILED")));
6462 7029
6463 return rc; 7030 return rc;
6464} 7031}
6465 7032
6466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7033#ifdef CONFIG_FUSION_LOGGING
6467static void 7034static void
6468EventDescriptionStr(u8 event, u32 evData0, char *evStr) 7035mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
6469{ 7036{
6470 char *ds = NULL; 7037 char *ds = NULL;
7038 u32 evData0;
7039 int ii;
7040 u8 event;
7041 char *evStr = ioc->evStr;
7042
7043 event = le32_to_cpu(pEventReply->Event) & 0xFF;
7044 evData0 = le32_to_cpu(pEventReply->Data[0]);
6471 7045
6472 switch(event) { 7046 switch(event) {
6473 case MPI_EVENT_NONE: 7047 case MPI_EVENT_NONE:
@@ -6501,9 +7075,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6501 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 7075 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
6502 ds = "Loop State(LIP) Change"; 7076 ds = "Loop State(LIP) Change";
6503 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 7077 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
6504 ds = "Loop State(LPE) Change"; /* ??? */ 7078 ds = "Loop State(LPE) Change";
6505 else 7079 else
6506 ds = "Loop State(LPB) Change"; /* ??? */ 7080 ds = "Loop State(LPB) Change";
6507 break; 7081 break;
6508 case MPI_EVENT_LOGOUT: 7082 case MPI_EVENT_LOGOUT:
6509 ds = "Logout"; 7083 ds = "Logout";
@@ -6703,28 +7277,65 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6703 } 7277 }
6704 case MPI_EVENT_IR2: 7278 case MPI_EVENT_IR2:
6705 { 7279 {
7280 u8 id = (u8)(evData0);
7281 u8 channel = (u8)(evData0 >> 8);
7282 u8 phys_num = (u8)(evData0 >> 24);
6706 u8 ReasonCode = (u8)(evData0 >> 16); 7283 u8 ReasonCode = (u8)(evData0 >> 16);
7284
6707 switch (ReasonCode) { 7285 switch (ReasonCode) {
6708 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: 7286 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
6709 ds = "IR2: LD State Changed"; 7287 snprintf(evStr, EVENT_DESCR_STR_SZ,
7288 "IR2: LD State Changed: "
7289 "id=%d channel=%d phys_num=%d",
7290 id, channel, phys_num);
6710 break; 7291 break;
6711 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: 7292 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
6712 ds = "IR2: PD State Changed"; 7293 snprintf(evStr, EVENT_DESCR_STR_SZ,
7294 "IR2: PD State Changed "
7295 "id=%d channel=%d phys_num=%d",
7296 id, channel, phys_num);
6713 break; 7297 break;
6714 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: 7298 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
6715 ds = "IR2: Bad Block Table Full"; 7299 snprintf(evStr, EVENT_DESCR_STR_SZ,
7300 "IR2: Bad Block Table Full: "
7301 "id=%d channel=%d phys_num=%d",
7302 id, channel, phys_num);
6716 break; 7303 break;
6717 case MPI_EVENT_IR2_RC_PD_INSERTED: 7304 case MPI_EVENT_IR2_RC_PD_INSERTED:
6718 ds = "IR2: PD Inserted"; 7305 snprintf(evStr, EVENT_DESCR_STR_SZ,
7306 "IR2: PD Inserted: "
7307 "id=%d channel=%d phys_num=%d",
7308 id, channel, phys_num);
6719 break; 7309 break;
6720 case MPI_EVENT_IR2_RC_PD_REMOVED: 7310 case MPI_EVENT_IR2_RC_PD_REMOVED:
6721 ds = "IR2: PD Removed"; 7311 snprintf(evStr, EVENT_DESCR_STR_SZ,
7312 "IR2: PD Removed: "
7313 "id=%d channel=%d phys_num=%d",
7314 id, channel, phys_num);
6722 break; 7315 break;
6723 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: 7316 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
6724 ds = "IR2: Foreign CFG Detected"; 7317 snprintf(evStr, EVENT_DESCR_STR_SZ,
7318 "IR2: Foreign CFG Detected: "
7319 "id=%d channel=%d phys_num=%d",
7320 id, channel, phys_num);
6725 break; 7321 break;
6726 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: 7322 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
6727 ds = "IR2: Rebuild Medium Error"; 7323 snprintf(evStr, EVENT_DESCR_STR_SZ,
7324 "IR2: Rebuild Medium Error: "
7325 "id=%d channel=%d phys_num=%d",
7326 id, channel, phys_num);
7327 break;
7328 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
7329 snprintf(evStr, EVENT_DESCR_STR_SZ,
7330 "IR2: Dual Port Added: "
7331 "id=%d channel=%d phys_num=%d",
7332 id, channel, phys_num);
7333 break;
7334 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
7335 snprintf(evStr, EVENT_DESCR_STR_SZ,
7336 "IR2: Dual Port Removed: "
7337 "id=%d channel=%d phys_num=%d",
7338 id, channel, phys_num);
6728 break; 7339 break;
6729 default: 7340 default:
6730 ds = "IR2"; 7341 ds = "IR2";
@@ -6760,13 +7371,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6760 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 7371 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
6761 { 7372 {
6762 u8 reason = (u8)(evData0); 7373 u8 reason = (u8)(evData0);
6763 u8 port_num = (u8)(evData0 >> 8);
6764 u16 handle = le16_to_cpu(evData0 >> 16);
6765 7374
6766 snprintf(evStr, EVENT_DESCR_STR_SZ, 7375 switch (reason) {
6767 "SAS Initiator Device Status Change: reason=0x%02x " 7376 case MPI_EVENT_SAS_INIT_RC_ADDED:
6768 "port=%d handle=0x%04x", 7377 ds = "SAS Initiator Status Change: Added";
6769 reason, port_num, handle); 7378 break;
7379 case MPI_EVENT_SAS_INIT_RC_REMOVED:
7380 ds = "SAS Initiator Status Change: Deleted";
7381 break;
7382 default:
7383 ds = "SAS Initiator Status Change";
7384 break;
7385 }
6770 break; 7386 break;
6771 } 7387 }
6772 7388
@@ -6814,6 +7430,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6814 break; 7430 break;
6815 } 7431 }
6816 7432
7433 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
7434 {
7435 u8 reason = (u8)(evData0);
7436
7437 switch (reason) {
7438 case MPI_EVENT_SAS_EXP_RC_ADDED:
7439 ds = "Expander Status Change: Added";
7440 break;
7441 case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
7442 ds = "Expander Status Change: Deleted";
7443 break;
7444 default:
7445 ds = "Expander Status Change";
7446 break;
7447 }
7448 break;
7449 }
7450
6817 /* 7451 /*
6818 * MPT base "custom" events may be added here... 7452 * MPT base "custom" events may be added here...
6819 */ 7453 */
@@ -6823,8 +7457,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6823 } 7457 }
6824 if (ds) 7458 if (ds)
6825 strncpy(evStr, ds, EVENT_DESCR_STR_SZ); 7459 strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
6826}
6827 7460
7461
7462 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
7463 "MPT event:(%02Xh) : %s\n",
7464 ioc->name, event, evStr));
7465
7466 devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
7467 ": Event data:\n"));
7468 for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
7469 devtverboseprintk(ioc, printk(" %08x",
7470 le32_to_cpu(pEventReply->Data[ii])));
7471 devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
7472}
7473#endif
6828/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7474/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6829/** 7475/**
6830 * ProcessEventNotification - Route EventNotificationReply to all event handlers 7476 * ProcessEventNotification - Route EventNotificationReply to all event handlers
@@ -6841,37 +7487,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6841{ 7487{
6842 u16 evDataLen; 7488 u16 evDataLen;
6843 u32 evData0 = 0; 7489 u32 evData0 = 0;
6844// u32 evCtx;
6845 int ii; 7490 int ii;
6846 u8 cb_idx; 7491 u8 cb_idx;
6847 int r = 0; 7492 int r = 0;
6848 int handlers = 0; 7493 int handlers = 0;
6849 char evStr[EVENT_DESCR_STR_SZ];
6850 u8 event; 7494 u8 event;
6851 7495
6852 /* 7496 /*
6853 * Do platform normalization of values 7497 * Do platform normalization of values
6854 */ 7498 */
6855 event = le32_to_cpu(pEventReply->Event) & 0xFF; 7499 event = le32_to_cpu(pEventReply->Event) & 0xFF;
6856// evCtx = le32_to_cpu(pEventReply->EventContext);
6857 evDataLen = le16_to_cpu(pEventReply->EventDataLength); 7500 evDataLen = le16_to_cpu(pEventReply->EventDataLength);
6858 if (evDataLen) { 7501 if (evDataLen) {
6859 evData0 = le32_to_cpu(pEventReply->Data[0]); 7502 evData0 = le32_to_cpu(pEventReply->Data[0]);
6860 } 7503 }
6861 7504
6862 EventDescriptionStr(event, evData0, evStr);
6863 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n",
6864 ioc->name,
6865 event,
6866 evStr));
6867
6868#ifdef CONFIG_FUSION_LOGGING 7505#ifdef CONFIG_FUSION_LOGGING
6869 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7506 if (evDataLen)
6870 ": Event data:\n", ioc->name)); 7507 mpt_display_event_info(ioc, pEventReply);
6871 for (ii = 0; ii < evDataLen; ii++)
6872 devtverboseprintk(ioc, printk(" %08x",
6873 le32_to_cpu(pEventReply->Data[ii])));
6874 devtverboseprintk(ioc, printk("\n"));
6875#endif 7508#endif
6876 7509
6877 /* 7510 /*
@@ -6926,8 +7559,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6926 */ 7559 */
6927 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7560 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6928 if (MptEvHandlers[cb_idx]) { 7561 if (MptEvHandlers[cb_idx]) {
6929 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n", 7562 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6930 ioc->name, cb_idx)); 7563 "Routing Event to event handler #%d\n",
7564 ioc->name, cb_idx));
6931 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); 7565 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
6932 handlers++; 7566 handlers++;
6933 } 7567 }
@@ -7011,8 +7645,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
7011 switch (info) { 7645 switch (info) {
7012 case 0x00010000: 7646 case 0x00010000:
7013 desc = "bug! MID not found"; 7647 desc = "bug! MID not found";
7014 if (ioc->reload_fw == 0)
7015 ioc->reload_fw++;
7016 break; 7648 break;
7017 7649
7018 case 0x00020000: 7650 case 0x00020000:
@@ -7613,7 +8245,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame);
7613EXPORT_SYMBOL(mpt_put_msg_frame); 8245EXPORT_SYMBOL(mpt_put_msg_frame);
7614EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); 8246EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
7615EXPORT_SYMBOL(mpt_free_msg_frame); 8247EXPORT_SYMBOL(mpt_free_msg_frame);
7616EXPORT_SYMBOL(mpt_add_sge);
7617EXPORT_SYMBOL(mpt_send_handshake_request); 8248EXPORT_SYMBOL(mpt_send_handshake_request);
7618EXPORT_SYMBOL(mpt_verify_adapter); 8249EXPORT_SYMBOL(mpt_verify_adapter);
7619EXPORT_SYMBOL(mpt_GetIocState); 8250EXPORT_SYMBOL(mpt_GetIocState);
@@ -7650,7 +8281,7 @@ fusion_init(void)
7650 /* Register ourselves (mptbase) in order to facilitate 8281 /* Register ourselves (mptbase) in order to facilitate
7651 * EventNotification handling. 8282 * EventNotification handling.
7652 */ 8283 */
7653 mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); 8284 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
7654 8285
7655 /* Register for hard reset handling callbacks. 8286 /* Register for hard reset handling callbacks.
7656 */ 8287 */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b3e981d2a506..1c8514dc31ca 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.07" 79#define MPT_LINUX_VERSION_COMMON "3.04.10"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -104,6 +104,7 @@
104#endif 104#endif
105 105
106#define MPT_NAME_LENGTH 32 106#define MPT_NAME_LENGTH 32
107#define MPT_KOBJ_NAME_LEN 20
107 108
108#define MPT_PROCFS_MPTBASEDIR "mpt" 109#define MPT_PROCFS_MPTBASEDIR "mpt"
109 /* chg it to "driver/fusion" ? */ 110 /* chg it to "driver/fusion" ? */
@@ -134,6 +135,7 @@
134 135
135#define MPT_COALESCING_TIMEOUT 0x10 136#define MPT_COALESCING_TIMEOUT 0x10
136 137
138
137/* 139/*
138 * SCSI transfer rate defines. 140 * SCSI transfer rate defines.
139 */ 141 */
@@ -161,10 +163,10 @@
161/* 163/*
162 * Set the MAX_SGE value based on user input. 164 * Set the MAX_SGE value based on user input.
163 */ 165 */
164#ifdef CONFIG_FUSION_MAX_SGE 166#ifdef CONFIG_FUSION_MAX_SGE
165#if CONFIG_FUSION_MAX_SGE < 16 167#if CONFIG_FUSION_MAX_SGE < 16
166#define MPT_SCSI_SG_DEPTH 16 168#define MPT_SCSI_SG_DEPTH 16
167#elif CONFIG_FUSION_MAX_SGE > 128 169#elif CONFIG_FUSION_MAX_SGE > 128
168#define MPT_SCSI_SG_DEPTH 128 170#define MPT_SCSI_SG_DEPTH 128
169#else 171#else
170#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE 172#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
@@ -173,6 +175,18 @@
173#define MPT_SCSI_SG_DEPTH 40 175#define MPT_SCSI_SG_DEPTH 40
174#endif 176#endif
175 177
178#ifdef CONFIG_FUSION_MAX_FC_SGE
179#if CONFIG_FUSION_MAX_FC_SGE < 16
180#define MPT_SCSI_FC_SG_DEPTH 16
181#elif CONFIG_FUSION_MAX_FC_SGE > 256
182#define MPT_SCSI_FC_SG_DEPTH 256
183#else
184#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
185#endif
186#else
187#define MPT_SCSI_FC_SG_DEPTH 40
188#endif
189
176/* debug print string length used for events and iocstatus */ 190/* debug print string length used for events and iocstatus */
177# define EVENT_DESCR_STR_SZ 100 191# define EVENT_DESCR_STR_SZ 100
178 192
@@ -431,38 +445,36 @@ do { \
431 * IOCTL structure and associated defines 445 * IOCTL structure and associated defines
432 */ 446 */
433 447
434#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
435#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
436#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
437#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
438#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
439#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
440#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
441
442#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ 448#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
443 449
444typedef struct _MPT_IOCTL { 450#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
445 struct _MPT_ADAPTER *ioc; 451#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
446 u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 452#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
447 u8 sense[MPT_SENSE_BUFFER_ALLOC]; 453#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
448 int wait_done; /* wake-up value for this ioc */ 454 on the current*/
449 u8 rsvd; 455#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
450 u8 status; /* current command status */ 456#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
451 u8 reset; /* 1 if bus reset allowed */ 457#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
452 u8 id; /* target for reset */ 458 complete routine */
453 struct mutex ioctl_mutex; 459
454} MPT_IOCTL; 460#define INITIALIZE_MGMT_STATUS(status) \
455 461 status = MPT_MGMT_STATUS_PENDING;
456#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ 462#define CLEAR_MGMT_STATUS(status) \
457#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ 463 status = 0;
458#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */ 464#define CLEAR_MGMT_PENDING_STATUS(status) \
459 465 status &= ~MPT_MGMT_STATUS_PENDING;
460typedef struct _MPT_SAS_MGMT { 466#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
467 msg_context = value;
468
469typedef struct _MPT_MGMT {
461 struct mutex mutex; 470 struct mutex mutex;
462 struct completion done; 471 struct completion done;
463 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 472 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
473 u8 sense[MPT_SENSE_BUFFER_ALLOC];
464 u8 status; /* current command status */ 474 u8 status; /* current command status */
465}MPT_SAS_MGMT; 475 int completion_code;
476 u32 msg_context;
477} MPT_MGMT;
466 478
467/* 479/*
468 * Event Structure and define 480 * Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
564 u8 flags; 576 u8 flags;
565}; 577};
566 578
579typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
580typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
581 dma_addr_t dma_addr);
582
567/* 583/*
568 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 584 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
569 */ 585 */
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
573 int pci_irq; /* This irq */ 589 int pci_irq; /* This irq */
574 char name[MPT_NAME_LENGTH]; /* "iocN" */ 590 char name[MPT_NAME_LENGTH]; /* "iocN" */
575 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ 591 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
592#ifdef CONFIG_FUSION_LOGGING
593 /* used in mpt_display_event_info */
594 char evStr[EVENT_DESCR_STR_SZ];
595#endif
576 char board_name[16]; 596 char board_name[16];
577 char board_assembly[16]; 597 char board_assembly[16];
578 char board_tracer[16]; 598 char board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
600 int reply_depth; /* Num Allocated reply frames */ 620 int reply_depth; /* Num Allocated reply frames */
601 int reply_sz; /* Reply frame size */ 621 int reply_sz; /* Reply frame size */
602 int num_chain; /* Number of chain buffers */ 622 int num_chain; /* Number of chain buffers */
623 MPT_ADD_SGE add_sge; /* Pointer to add_sge
624 function */
625 MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
626 function */
603 /* Pool of buffers for chaining. ReqToChain 627 /* Pool of buffers for chaining. ReqToChain
604 * and ChainToChain track index of chain buffers. 628 * and ChainToChain track index of chain buffers.
605 * ChainBuffer (DMA) virt/phys addresses. 629 * ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
640 RaidCfgData raid_data; /* Raid config. data */ 664 RaidCfgData raid_data; /* Raid config. data */
641 SasCfgData sas_data; /* Sas config. data */ 665 SasCfgData sas_data; /* Sas config. data */
642 FcCfgData fc_data; /* Fc config. data */ 666 FcCfgData fc_data; /* Fc config. data */
643 MPT_IOCTL *ioctl; /* ioctl data pointer */
644 struct proc_dir_entry *ioc_dentry; 667 struct proc_dir_entry *ioc_dentry;
645 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 668 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
646 spinlock_t diagLock; /* diagnostic reset lock */
647 int diagPending;
648 u32 biosVersion; /* BIOS version from IO Unit Page 2 */ 669 u32 biosVersion; /* BIOS version from IO Unit Page 2 */
649 int eventTypes; /* Event logging parameters */ 670 int eventTypes; /* Event logging parameters */
650 int eventContext; /* Next event context */ 671 int eventContext; /* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
652 struct _mpt_ioctl_events *events; /* pointer to event log */ 673 struct _mpt_ioctl_events *events; /* pointer to event log */
653 u8 *cached_fw; /* Pointer to FW */ 674 u8 *cached_fw; /* Pointer to FW */
654 dma_addr_t cached_fw_dma; 675 dma_addr_t cached_fw_dma;
655 struct list_head configQ; /* linked list of config. requests */
656 int hs_reply_idx; 676 int hs_reply_idx;
657#ifndef MFCNT 677#ifndef MFCNT
658 u32 pad0; 678 u32 pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
665 IOCFactsReply_t facts; 685 IOCFactsReply_t facts;
666 PortFactsReply_t pfacts[2]; 686 PortFactsReply_t pfacts[2];
667 FCPortPage0_t fc_port_page0[2]; 687 FCPortPage0_t fc_port_page0[2];
668 struct timer_list persist_timer; /* persist table timer */
669 int persist_wait_done; /* persist completion flag */
670 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
671 LANPage0_t lan_cnfg_page0; 688 LANPage0_t lan_cnfg_page0;
672 LANPage1_t lan_cnfg_page1; 689 LANPage1_t lan_cnfg_page1;
673 690
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
682 int aen_event_read_flag; /* flag to indicate event log was read*/ 699 int aen_event_read_flag; /* flag to indicate event log was read*/
683 u8 FirstWhoInit; 700 u8 FirstWhoInit;
684 u8 upload_fw; /* If set, do a fw upload */ 701 u8 upload_fw; /* If set, do a fw upload */
685 u8 reload_fw; /* Force a FW Reload on next reset */
686 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 702 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
687 u8 pad1[4]; 703 u8 pad1[4];
688 u8 DoneCtx; 704 u8 DoneCtx;
689 u8 TaskCtx; 705 u8 TaskCtx;
690 u8 InternalCtx; 706 u8 InternalCtx;
691 spinlock_t initializing_hba_lock;
692 int initializing_hba_lock_flag;
693 struct list_head list; 707 struct list_head list;
694 struct net_device *netdev; 708 struct net_device *netdev;
695 struct list_head sas_topology; 709 struct list_head sas_topology;
696 struct mutex sas_topology_mutex; 710 struct mutex sas_topology_mutex;
711
712 struct workqueue_struct *fw_event_q;
713 struct list_head fw_event_list;
714 spinlock_t fw_event_lock;
715 u8 fw_events_off; /* if '1', then ignore events */
716 char fw_event_q_name[MPT_KOBJ_NAME_LEN];
717
697 struct mutex sas_discovery_mutex; 718 struct mutex sas_discovery_mutex;
698 u8 sas_discovery_runtime; 719 u8 sas_discovery_runtime;
699 u8 sas_discovery_ignore_events; 720 u8 sas_discovery_ignore_events;
721
722 /* port_info object for the host */
723 struct mptsas_portinfo *hba_port_info;
724 u64 hba_port_sas_addr;
725 u16 hba_port_num_phy;
726 struct list_head sas_device_info_list;
727 struct mutex sas_device_info_mutex;
728 u8 old_sas_discovery_protocal;
729 u8 sas_discovery_quiesce_io;
700 int sas_index; /* index refrencing */ 730 int sas_index; /* index refrencing */
701 MPT_SAS_MGMT sas_mgmt; 731 MPT_MGMT sas_mgmt;
732 MPT_MGMT mptbase_cmds; /* for sending config pages */
733 MPT_MGMT internal_cmds;
734 MPT_MGMT taskmgmt_cmds;
735 MPT_MGMT ioctl_cmds;
736 spinlock_t taskmgmt_lock; /* diagnostic reset lock */
737 int taskmgmt_in_progress;
738 u8 taskmgmt_quiesce_io;
739 u8 ioc_reset_in_progress;
702 struct work_struct sas_persist_task; 740 struct work_struct sas_persist_task;
703 741
704 struct work_struct fc_setup_reset_work; 742 struct work_struct fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
707 u8 fc_link_speed[2]; 745 u8 fc_link_speed[2];
708 spinlock_t fc_rescan_work_lock; 746 spinlock_t fc_rescan_work_lock;
709 struct work_struct fc_rescan_work; 747 struct work_struct fc_rescan_work;
710 char fc_rescan_work_q_name[20]; 748 char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
711 struct workqueue_struct *fc_rescan_work_q; 749 struct workqueue_struct *fc_rescan_work_q;
750
751 /* driver forced bus resets count */
752 unsigned long hard_resets;
753 /* fw/external bus resets count */
754 unsigned long soft_resets;
755 /* cmd timeouts */
756 unsigned long timeouts;
757
712 struct scsi_cmnd **ScsiLookup; 758 struct scsi_cmnd **ScsiLookup;
713 spinlock_t scsi_lookup_lock; 759 spinlock_t scsi_lookup_lock;
714 760 u64 dma_mask;
715 char reset_work_q_name[20]; 761 u32 broadcast_aen_busy;
762 char reset_work_q_name[MPT_KOBJ_NAME_LEN];
716 struct workqueue_struct *reset_work_q; 763 struct workqueue_struct *reset_work_q;
717 struct delayed_work fault_reset_work; 764 struct delayed_work fault_reset_work;
718 spinlock_t fault_reset_work_lock; 765
766 u8 sg_addr_size;
767 u8 in_rescan;
768 u8 SGE_size;
719 769
720} MPT_ADAPTER; 770} MPT_ADAPTER;
721 771
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
753 dma_addr_t Address; 803 dma_addr_t Address;
754} MptSge_t; 804} MptSge_t;
755 805
756#define mpt_addr_size() \
757 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
758 MPI_SGE_FLAGS_32_BIT_ADDRESSING)
759 806
760#define mpt_msg_flags() \ 807#define mpt_msg_flags(ioc) \
761 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ 808 (ioc->sg_addr_size == sizeof(u64)) ? \
762 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32) 809 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
810 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
811
812#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
813 (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
763 814
764/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
765/* 816/*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
835 /* Pool of memory for holding SCpnts before doing 886 /* Pool of memory for holding SCpnts before doing
836 * OS callbacks. freeQ is the free pool. 887 * OS callbacks. freeQ is the free pool.
837 */ 888 */
838 u8 tmPending;
839 u8 resetPending;
840 u8 negoNvram; /* DV disabled, nego NVRAM */ 889 u8 negoNvram; /* DV disabled, nego NVRAM */
841 u8 pad1; 890 u8 pad1;
842 u8 tmState;
843 u8 rsvd[2]; 891 u8 rsvd[2];
844 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ 892 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
845 struct scsi_cmnd *abortSCpnt; 893 struct scsi_cmnd *abortSCpnt;
846 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ 894 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
847 unsigned long hard_resets; /* driver forced bus resets count */
848 unsigned long soft_resets; /* fw/external bus resets count */
849 unsigned long timeouts; /* cmd timeouts */
850 ushort sel_timeout[MPT_MAX_FC_DEVICES]; 895 ushort sel_timeout[MPT_MAX_FC_DEVICES];
851 char *info_kbuf; 896 char *info_kbuf;
852 wait_queue_head_t scandv_waitq;
853 int scandv_wait_done;
854 long last_queue_full; 897 long last_queue_full;
855 u16 tm_iocstatus; 898 u16 tm_iocstatus;
856 u16 spi_pending; 899 u16 spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
870 * Generic structure passed to the base mpt_config function. 913 * Generic structure passed to the base mpt_config function.
871 */ 914 */
872typedef struct _x_config_parms { 915typedef struct _x_config_parms {
873 struct list_head linkage; /* linked list */
874 struct timer_list timer; /* timer function for this request */
875 union { 916 union {
876 ConfigExtendedPageHeader_t *ehdr; 917 ConfigExtendedPageHeader_t *ehdr;
877 ConfigPageHeader_t *hdr; 918 ConfigPageHeader_t *hdr;
878 } cfghdr; 919 } cfghdr;
879 dma_addr_t physAddr; 920 dma_addr_t physAddr;
880 int wait_done; /* wait for this request */
881 u32 pageAddr; /* properly formatted */ 921 u32 pageAddr; /* properly formatted */
922 u16 status;
882 u8 action; 923 u8 action;
883 u8 dir; 924 u8 dir;
884 u8 timeout; /* seconds */ 925 u8 timeout; /* seconds */
885 u8 pad1;
886 u16 status;
887 u16 pad2;
888} CONFIGPARMS; 926} CONFIGPARMS;
889 927
890/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 928/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
909extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 947extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
910extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 948extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
911extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 949extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
912extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
913 950
914extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); 951extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
915extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); 952extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
922extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 959extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
923extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 960extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
924extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); 961extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
962extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
963 pRaidPhysDiskPage1_t phys_disk);
964extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
965 u8 phys_disk_num);
966extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
967extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
925extern void mpt_halt_firmware(MPT_ADAPTER *ioc); 968extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
926 969
927 970
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
959#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) 1002#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
960#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) 1003#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
961#define MPT_SGE_FLAGS_DIRECTION (0x04000000) 1004#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
962#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
963#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) 1005#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
964 1006
965#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) 1007#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
972 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1014 MPT_SGE_FLAGS_END_OF_BUFFER | \
973 MPT_SGE_FLAGS_END_OF_LIST | \ 1015 MPT_SGE_FLAGS_END_OF_LIST | \
974 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1016 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
975 MPT_SGE_FLAGS_ADDRESSING | \
976 MPT_TRANSFER_IOC_TO_HOST) 1017 MPT_TRANSFER_IOC_TO_HOST)
977#define MPT_SGE_FLAGS_SSIMPLE_WRITE \ 1018#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
978 (MPT_SGE_FLAGS_LAST_ELEMENT | \ 1019 (MPT_SGE_FLAGS_LAST_ELEMENT | \
979 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1020 MPT_SGE_FLAGS_END_OF_BUFFER | \
980 MPT_SGE_FLAGS_END_OF_LIST | \ 1021 MPT_SGE_FLAGS_END_OF_LIST | \
981 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1022 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
982 MPT_SGE_FLAGS_ADDRESSING | \
983 MPT_TRANSFER_HOST_TO_IOC) 1023 MPT_TRANSFER_HOST_TO_IOC)
984 1024
985/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1025/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index c63817117c0a..9b2e2198aee9 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION);
84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
85 85
86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; 86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
87static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
87 88
88static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); 89static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
89 90
@@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
127 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); 128 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
128static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, 129static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
129 struct buflist *buflist, MPT_ADAPTER *ioc); 130 struct buflist *buflist, MPT_ADAPTER *ioc);
130static void mptctl_timeout_expired (MPT_IOCTL *ioctl); 131static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
131static int mptctl_bus_reset(MPT_IOCTL *ioctl);
132static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
133static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
134 132
135/* 133/*
136 * Reset Handler cleanup function 134 * Reset Handler cleanup function
@@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
183 int rc = 0; 181 int rc = 0;
184 182
185 if (nonblock) { 183 if (nonblock) {
186 if (!mutex_trylock(&ioc->ioctl->ioctl_mutex)) 184 if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
187 rc = -EAGAIN; 185 rc = -EAGAIN;
188 } else { 186 } else {
189 if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex)) 187 if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
190 rc = -ERESTARTSYS; 188 rc = -ERESTARTSYS;
191 } 189 }
192 return rc; 190 return rc;
@@ -202,99 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
202static int 200static int
203mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) 201mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
204{ 202{
205 char *sense_data; 203 char *sense_data;
206 int sz, req_index; 204 int req_index;
207 u16 iocStatus; 205 int sz;
208 u8 cmd;
209 206
210 if (req) 207 if (!req)
211 cmd = req->u.hdr.Function; 208 return 0;
212 else
213 return 1;
214 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, "
215 "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply));
216
217 if (ioc->ioctl) {
218
219 if (reply==NULL) {
220
221 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply "
222 "Function=%x!\n", ioc->name, cmd));
223 209
224 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; 210 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
225 ioc->ioctl->reset &= ~MPTCTL_RESET_OK; 211 "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
212 req, reply));
226 213
227 /* We are done, issue wake up 214 /*
228 */ 215 * Handling continuation of the same reply. Processing the first
229 ioc->ioctl->wait_done = 1; 216 * reply, and eating the other replys that come later.
230 wake_up (&mptctl_wait); 217 */
231 return 1; 218 if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
219 goto out_continuation;
232 220
233 } 221 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
234 222
235 /* Copy the reply frame (which much exist 223 if (!reply)
236 * for non-SCSI I/O) to the IOC structure. 224 goto out;
237 */
238 memcpy(ioc->ioctl->ReplyFrame, reply,
239 min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
240 ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
241 225
242 /* Set the command status to GOOD if IOC Status is GOOD 226 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
243 * OR if SCSI I/O cmd and data underrun or recovered error. 227 sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
244 */ 228 memcpy(ioc->ioctl_cmds.reply, reply, sz);
245 iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
246 if (iocStatus == MPI_IOCSTATUS_SUCCESS)
247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
248
249 if (iocStatus || reply->u.reply.IOCLogInfo)
250 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), "
251 "loginfo (0x%08X)\n", ioc->name,
252 iocStatus,
253 le32_to_cpu(reply->u.reply.IOCLogInfo)));
254
255 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
256 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
257
258 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
259 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
260 "\tscsi_status (0x%02x), scsi_state (0x%02x), "
261 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
262 reply->u.sreply.SCSIStatus,
263 reply->u.sreply.SCSIState,
264 le16_to_cpu(reply->u.sreply.TaskTag),
265 le32_to_cpu(reply->u.sreply.TransferCount)));
266
267 ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
268
269 if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
270 (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
271 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
272 }
273 }
274 229
275 /* Copy the sense data - if present 230 if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
276 */ 231 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
277 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && 232 "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
278 (reply->u.sreply.SCSIState & 233 le16_to_cpu(reply->u.reply.IOCStatus),
279 MPI_SCSI_STATE_AUTOSENSE_VALID)){ 234 le32_to_cpu(reply->u.reply.IOCLogInfo)));
235
236 if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
237 (req->u.hdr.Function ==
238 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
239
240 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
241 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
242 "scsi_status (0x%02x), scsi_state (0x%02x), "
243 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
244 reply->u.sreply.SCSIStatus,
245 reply->u.sreply.SCSIState,
246 le16_to_cpu(reply->u.sreply.TaskTag),
247 le32_to_cpu(reply->u.sreply.TransferCount)));
248
249 if (reply->u.sreply.SCSIState &
250 MPI_SCSI_STATE_AUTOSENSE_VALID) {
280 sz = req->u.scsireq.SenseBufferLength; 251 sz = req->u.scsireq.SenseBufferLength;
281 req_index = 252 req_index =
282 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); 253 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
283 sense_data = 254 sense_data = ((u8 *)ioc->sense_buf_pool +
284 ((u8 *)ioc->sense_buf_pool +
285 (req_index * MPT_SENSE_BUFFER_ALLOC)); 255 (req_index * MPT_SENSE_BUFFER_ALLOC));
286 memcpy(ioc->ioctl->sense, sense_data, sz); 256 memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
287 ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; 257 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
288 } 258 }
259 }
289 260
290 if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) 261 out:
291 mptctl_free_tm_flags(ioc); 262 /* We are done, issue wake up
292 263 */
293 /* We are done, issue wake up 264 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
294 */ 265 if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
295 ioc->ioctl->wait_done = 1; 266 mpt_clear_taskmgmt_in_progress_flag(ioc);
296 wake_up (&mptctl_wait); 267 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
268 complete(&ioc->ioctl_cmds.done);
297 } 269 }
270
271 out_continuation:
272 if (reply && (reply->u.reply.MsgFlags &
273 MPI_MSGFLAGS_CONTINUATION_REPLY))
274 return 0;
298 return 1; 275 return 1;
299} 276}
300 277
@@ -304,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
304 * Expecting an interrupt, however timed out. 281 * Expecting an interrupt, however timed out.
305 * 282 *
306 */ 283 */
307static void mptctl_timeout_expired (MPT_IOCTL *ioctl) 284static void
285mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
308{ 286{
309 int rc = 1; 287 unsigned long flags;
310 288
311 if (ioctl == NULL) 289 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
312 return; 290 ioc->name, __func__));
313 dctlprintk(ioctl->ioc,
314 printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
315 ioctl->ioc->name, ioctl->ioc->id));
316 291
317 ioctl->wait_done = 0; 292 if (mpt_fwfault_debug)
318 if (ioctl->reset & MPTCTL_RESET_OK) 293 mpt_halt_firmware(ioc);
319 rc = mptctl_bus_reset(ioctl);
320 294
321 if (rc) { 295 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
322 /* Issue a reset for this device. 296 if (ioc->ioc_reset_in_progress) {
323 * The IOC is not responding. 297 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
324 */ 298 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
325 dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 299 mpt_free_msg_frame(ioc, mf);
326 ioctl->ioc->name)); 300 return;
327 mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
328 } 301 }
329 return; 302 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
303
330 304
305 if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
306 return;
307
308 /* Issue a reset for this device.
309 * The IOC is not responding.
310 */
311 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
312 ioc->name));
313 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
314 mpt_HardResetHandler(ioc, CAN_SLEEP);
315 mpt_free_msg_frame(ioc, mf);
316}
317
318static int
319mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
320{
321 if (!mf)
322 return 0;
323
324 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
325 "TaskMgmt completed (mf=%p, mr=%p)\n",
326 ioc->name, mf, mr));
327
328 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
329
330 if (!mr)
331 goto out;
332
333 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
334 memcpy(ioc->taskmgmt_cmds.reply, mr,
335 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
336 out:
337 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
338 mpt_clear_taskmgmt_in_progress_flag(ioc);
339 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
340 complete(&ioc->taskmgmt_cmds.done);
341 return 1;
342 }
343 return 0;
331} 344}
332 345
333/* mptctl_bus_reset 346/* mptctl_bus_reset
@@ -335,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
335 * Bus reset code. 348 * Bus reset code.
336 * 349 *
337 */ 350 */
338static int mptctl_bus_reset(MPT_IOCTL *ioctl) 351static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
339{ 352{
340 MPT_FRAME_HDR *mf; 353 MPT_FRAME_HDR *mf;
341 SCSITaskMgmt_t *pScsiTm; 354 SCSITaskMgmt_t *pScsiTm;
342 MPT_SCSI_HOST *hd; 355 SCSITaskMgmtReply_t *pScsiTmReply;
343 int ii; 356 int ii;
344 int retval=0; 357 int retval;
345 358 unsigned long timeout;
346 359 unsigned long time_count;
347 ioctl->reset &= ~MPTCTL_RESET_OK; 360 u16 iocstatus;
348 361
349 if (ioctl->ioc->sh == NULL) 362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */
363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
364 (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 "TaskMgmt, not SCSI_IO!!\n", ioc->name));
350 return -EPERM; 367 return -EPERM;
368 }
351 369
352 hd = shost_priv(ioctl->ioc->sh); 370 mutex_lock(&ioc->taskmgmt_cmds.mutex);
353 if (hd == NULL) 371 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
372 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
354 return -EPERM; 373 return -EPERM;
374 }
355 375
356 /* Single threading .... 376 retval = 0;
357 */
358 if (mptctl_set_tm_flags(hd) != 0)
359 return -EPERM;
360 377
361 /* Send request 378 /* Send request
362 */ 379 */
363 if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) { 380 mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
364 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n", 381 if (mf == NULL) {
365 ioctl->ioc->name)); 382 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 383 "TaskMgmt, no msg frames!!\n", ioc->name));
367 mptctl_free_tm_flags(ioctl->ioc); 384 mpt_clear_taskmgmt_in_progress_flag(ioc);
368 return -ENOMEM; 385 retval = -ENOMEM;
386 goto mptctl_bus_reset_done;
369 } 387 }
370 388
371 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 389 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
372 ioctl->ioc->name, mf)); 390 ioc->name, mf));
373 391
374 pScsiTm = (SCSITaskMgmt_t *) mf; 392 pScsiTm = (SCSITaskMgmt_t *) mf;
375 pScsiTm->TargetID = ioctl->id; 393 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
376 pScsiTm->Bus = hd->port; /* 0 */
377 pScsiTm->ChainOffset = 0;
378 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 394 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
379 pScsiTm->Reserved = 0;
380 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; 395 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
381 pScsiTm->Reserved1 = 0;
382 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; 396 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
383 397 pScsiTm->TargetID = 0;
398 pScsiTm->Bus = 0;
399 pScsiTm->ChainOffset = 0;
400 pScsiTm->Reserved = 0;
401 pScsiTm->Reserved1 = 0;
402 pScsiTm->TaskMsgContext = 0;
384 for (ii= 0; ii < 8; ii++) 403 for (ii= 0; ii < 8; ii++)
385 pScsiTm->LUN[ii] = 0; 404 pScsiTm->LUN[ii] = 0;
386
387 for (ii=0; ii < 7; ii++) 405 for (ii=0; ii < 7; ii++)
388 pScsiTm->Reserved2[ii] = 0; 406 pScsiTm->Reserved2[ii] = 0;
389 407
390 pScsiTm->TaskMsgContext = 0; 408 switch (ioc->bus_type) {
391 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT 409 case FC:
392 "mptctl_bus_reset: issued.\n", ioctl->ioc->name)); 410 timeout = 40;
393 411 break;
394 DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf); 412 case SAS:
413 timeout = 30;
414 break;
415 case SPI:
416 default:
417 timeout = 2;
418 break;
419 }
395 420
396 ioctl->wait_done=0; 421 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
422 "TaskMgmt type=%d timeout=%ld\n",
423 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
397 424
398 if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 425 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
399 (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 426 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
400 mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf); 427 time_count = jiffies;
428 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
429 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
430 mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
401 else { 431 else {
402 retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc, 432 retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
403 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 433 sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
404 if (retval != 0) { 434 if (retval != 0) {
405 dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!" 435 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
406 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, 436 "TaskMgmt send_handshake FAILED!"
407 hd->ioc, mf)); 437 " (ioc %p, mf %p, rc=%d) \n", ioc->name,
438 ioc, mf, retval));
439 mpt_clear_taskmgmt_in_progress_flag(ioc);
408 goto mptctl_bus_reset_done; 440 goto mptctl_bus_reset_done;
409 } 441 }
410 } 442 }
411 443
412 /* Now wait for the command to complete */ 444 /* Now wait for the command to complete */
413 ii = wait_event_timeout(mptctl_wait, 445 ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
414 ioctl->wait_done == 1, 446 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
415 HZ*5 /* 5 second timeout */); 447 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
448 "TaskMgmt failed\n", ioc->name));
449 mpt_free_msg_frame(ioc, mf);
450 mpt_clear_taskmgmt_in_progress_flag(ioc);
451 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
452 retval = 0;
453 else
454 retval = -1; /* return failure */
455 goto mptctl_bus_reset_done;
456 }
416 457
417 if(ii <=0 && (ioctl->wait_done != 1 )) { 458 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
418 mpt_free_msg_frame(hd->ioc, mf); 459 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
419 ioctl->wait_done = 0; 460 "TaskMgmt failed\n", ioc->name));
461 retval = -1; /* return failure */
462 goto mptctl_bus_reset_done;
463 }
464
465 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
466 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
467 "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
468 "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
469 "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
470 pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
471 le16_to_cpu(pScsiTmReply->IOCStatus),
472 le32_to_cpu(pScsiTmReply->IOCLogInfo),
473 pScsiTmReply->ResponseCode,
474 le32_to_cpu(pScsiTmReply->TerminationCount)));
475
476 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
477
478 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
479 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
480 iocstatus == MPI_IOCSTATUS_SUCCESS)
481 retval = 0;
482 else {
483 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
484 "TaskMgmt failed\n", ioc->name));
420 retval = -1; /* return failure */ 485 retval = -1; /* return failure */
421 } 486 }
422 487
423mptctl_bus_reset_done:
424 488
425 mptctl_free_tm_flags(ioctl->ioc); 489 mptctl_bus_reset_done:
490 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
491 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
426 return retval; 492 return retval;
427} 493}
428 494
429static int
430mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
431 unsigned long flags;
432
433 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
434
435 if (hd->tmState == TM_STATE_NONE) {
436 hd->tmState = TM_STATE_IN_PROGRESS;
437 hd->tmPending = 1;
438 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
439 } else {
440 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
441 return -EBUSY;
442 }
443
444 return 0;
445}
446
447static void
448mptctl_free_tm_flags(MPT_ADAPTER *ioc)
449{
450 MPT_SCSI_HOST * hd;
451 unsigned long flags;
452
453 hd = shost_priv(ioc->sh);
454 if (hd == NULL)
455 return;
456
457 spin_lock_irqsave(&ioc->FreeQlock, flags);
458
459 hd->tmState = TM_STATE_NONE;
460 hd->tmPending = 0;
461 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
462
463 return;
464}
465 495
466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 496/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
467/* mptctl_ioc_reset 497/* mptctl_ioc_reset
@@ -473,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc)
473static int 503static int
474mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 504mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
475{ 505{
476 MPT_IOCTL *ioctl = ioc->ioctl;
477 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name,
478 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
479 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
480
481 if(ioctl == NULL)
482 return 1;
483
484 switch(reset_phase) { 506 switch(reset_phase) {
485 case MPT_IOC_SETUP_RESET: 507 case MPT_IOC_SETUP_RESET:
486 ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET; 508 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
509 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
510 break;
511 case MPT_IOC_PRE_RESET:
512 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
513 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
487 break; 514 break;
488 case MPT_IOC_POST_RESET: 515 case MPT_IOC_POST_RESET:
489 ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET; 516 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
517 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
518 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
519 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
520 complete(&ioc->ioctl_cmds.done);
521 }
490 break; 522 break;
491 case MPT_IOC_PRE_RESET:
492 default: 523 default:
493 break; 524 break;
494 } 525 }
@@ -642,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642 else 673 else
643 ret = -EINVAL; 674 ret = -EINVAL;
644 675
645 mutex_unlock(&iocp->ioctl->ioctl_mutex); 676 mutex_unlock(&iocp->ioctl_cmds.mutex);
646 677
647 return ret; 678 return ret;
648} 679}
@@ -758,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
758 int sge_offset = 0; 789 int sge_offset = 0;
759 u16 iocstat; 790 u16 iocstat;
760 pFWDownloadReply_t ReplyMsg = NULL; 791 pFWDownloadReply_t ReplyMsg = NULL;
792 unsigned long timeleft;
761 793
762 if (mpt_verify_adapter(ioc, &iocp) < 0) { 794 if (mpt_verify_adapter(ioc, &iocp) < 0) {
763 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", 795 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
@@ -841,8 +873,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
841 * 96 8 873 * 96 8
842 * 64 4 874 * 64 4
843 */ 875 */
844 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) 876 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
845 / (sizeof(dma_addr_t) + sizeof(u32)); 877 sizeof(FWDownloadTCSGE_t))
878 / iocp->SGE_size;
846 if (numfrags > maxfrags) { 879 if (numfrags > maxfrags) {
847 ret = -EMLINK; 880 ret = -EMLINK;
848 goto fwdl_out; 881 goto fwdl_out;
@@ -870,7 +903,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
870 if (nib == 0 || nib == 3) { 903 if (nib == 0 || nib == 3) {
871 ; 904 ;
872 } else if (sgIn->Address) { 905 } else if (sgIn->Address) {
873 mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); 906 iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
874 n++; 907 n++;
875 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { 908 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
876 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " 909 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
@@ -882,7 +915,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
882 } 915 }
883 sgIn++; 916 sgIn++;
884 bl++; 917 bl++;
885 sgOut += (sizeof(dma_addr_t) + sizeof(u32)); 918 sgOut += iocp->SGE_size;
886 } 919 }
887 920
888 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); 921 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
@@ -891,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
891 * Finally, perform firmware download. 924 * Finally, perform firmware download.
892 */ 925 */
893 ReplyMsg = NULL; 926 ReplyMsg = NULL;
927 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
928 INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
894 mpt_put_msg_frame(mptctl_id, iocp, mf); 929 mpt_put_msg_frame(mptctl_id, iocp, mf);
895 930
896 /* Now wait for the command to complete */ 931 /* Now wait for the command to complete */
897 ret = wait_event_timeout(mptctl_wait, 932retry_wait:
898 iocp->ioctl->wait_done == 1, 933 timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
899 HZ*60); 934 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
935 ret = -ETIME;
936 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
937 if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
938 mpt_free_msg_frame(iocp, mf);
939 goto fwdl_out;
940 }
941 if (!timeleft)
942 mptctl_timeout_expired(iocp, mf);
943 else
944 goto retry_wait;
945 goto fwdl_out;
946 }
900 947
901 if(ret <=0 && (iocp->ioctl->wait_done != 1 )) { 948 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
902 /* Now we need to reset the board */ 949 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
903 mptctl_timeout_expired(iocp->ioctl); 950 mpt_free_msg_frame(iocp, mf);
904 ret = -ENODATA; 951 ret = -ENODATA;
905 goto fwdl_out; 952 goto fwdl_out;
906 } 953 }
@@ -908,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
908 if (sgl) 955 if (sgl)
909 kfree_sgl(sgl, sgl_dma, buflist, iocp); 956 kfree_sgl(sgl, sgl_dma, buflist, iocp);
910 957
911 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame; 958 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
912 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; 959 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
913 if (iocstat == MPI_IOCSTATUS_SUCCESS) { 960 if (iocstat == MPI_IOCSTATUS_SUCCESS) {
914 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); 961 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name);
@@ -932,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
932 return 0; 979 return 0;
933 980
934fwdl_out: 981fwdl_out:
982
983 CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
984 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
935 kfree_sgl(sgl, sgl_dma, buflist, iocp); 985 kfree_sgl(sgl, sgl_dma, buflist, iocp);
936 return ret; 986 return ret;
937} 987}
@@ -1003,7 +1053,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1003 * 1053 *
1004 */ 1054 */
1005 sgl = sglbuf; 1055 sgl = sglbuf;
1006 sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1; 1056 sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
1007 while (bytes_allocd < bytes) { 1057 while (bytes_allocd < bytes) {
1008 this_alloc = min(alloc_sz, bytes-bytes_allocd); 1058 this_alloc = min(alloc_sz, bytes-bytes_allocd);
1009 buflist[buflist_ent].len = this_alloc; 1059 buflist[buflist_ent].len = this_alloc;
@@ -1024,8 +1074,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1024 dma_addr_t dma_addr; 1074 dma_addr_t dma_addr;
1025 1075
1026 bytes_allocd += this_alloc; 1076 bytes_allocd += this_alloc;
1027 sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc); 1077 sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
1028 dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); 1078 dma_addr = pci_map_single(ioc->pcidev,
1079 buflist[buflist_ent].kptr, this_alloc, dir);
1029 sgl->Address = dma_addr; 1080 sgl->Address = dma_addr;
1030 1081
1031 fragcnt++; 1082 fragcnt++;
@@ -1771,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1771 int msgContext; 1822 int msgContext;
1772 u16 req_idx; 1823 u16 req_idx;
1773 ulong timeout; 1824 ulong timeout;
1825 unsigned long timeleft;
1774 struct scsi_device *sdev; 1826 struct scsi_device *sdev;
1827 unsigned long flags;
1828 u8 function;
1775 1829
1776 /* bufIn and bufOut are used for user to kernel space transfers 1830 /* bufIn and bufOut are used for user to kernel space transfers
1777 */ 1831 */
@@ -1784,24 +1838,23 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1784 __FILE__, __LINE__, iocnum); 1838 __FILE__, __LINE__, iocnum);
1785 return -ENODEV; 1839 return -ENODEV;
1786 } 1840 }
1787 if (!ioc->ioctl) { 1841
1788 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1842 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1789 "No memory available during driver init.\n", 1843 if (ioc->ioc_reset_in_progress) {
1790 __FILE__, __LINE__); 1844 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1791 return -ENOMEM;
1792 } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
1793 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1845 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
1794 "Busy with IOC Reset \n", __FILE__, __LINE__); 1846 "Busy with diagnostic reset\n", __FILE__, __LINE__);
1795 return -EBUSY; 1847 return -EBUSY;
1796 } 1848 }
1849 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1797 1850
1798 /* Verify that the final request frame will not be too large. 1851 /* Verify that the final request frame will not be too large.
1799 */ 1852 */
1800 sz = karg.dataSgeOffset * 4; 1853 sz = karg.dataSgeOffset * 4;
1801 if (karg.dataInSize > 0) 1854 if (karg.dataInSize > 0)
1802 sz += sizeof(dma_addr_t) + sizeof(u32); 1855 sz += ioc->SGE_size;
1803 if (karg.dataOutSize > 0) 1856 if (karg.dataOutSize > 0)
1804 sz += sizeof(dma_addr_t) + sizeof(u32); 1857 sz += ioc->SGE_size;
1805 1858
1806 if (sz > ioc->req_sz) { 1859 if (sz > ioc->req_sz) {
1807 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1860 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1827,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1827 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1880 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
1828 "Unable to read MF from mpt_ioctl_command struct @ %p\n", 1881 "Unable to read MF from mpt_ioctl_command struct @ %p\n",
1829 ioc->name, __FILE__, __LINE__, mfPtr); 1882 ioc->name, __FILE__, __LINE__, mfPtr);
1883 function = -1;
1830 rc = -EFAULT; 1884 rc = -EFAULT;
1831 goto done_free_mem; 1885 goto done_free_mem;
1832 } 1886 }
1833 hdr->MsgContext = cpu_to_le32(msgContext); 1887 hdr->MsgContext = cpu_to_le32(msgContext);
1888 function = hdr->Function;
1834 1889
1835 1890
1836 /* Verify that this request is allowed. 1891 /* Verify that this request is allowed.
@@ -1838,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1838 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", 1893 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
1839 ioc->name, hdr->Function, mf)); 1894 ioc->name, hdr->Function, mf));
1840 1895
1841 switch (hdr->Function) { 1896 switch (function) {
1842 case MPI_FUNCTION_IOC_FACTS: 1897 case MPI_FUNCTION_IOC_FACTS:
1843 case MPI_FUNCTION_PORT_FACTS: 1898 case MPI_FUNCTION_PORT_FACTS:
1844 karg.dataOutSize = karg.dataInSize = 0; 1899 karg.dataOutSize = karg.dataInSize = 0;
@@ -1893,7 +1948,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1893 } 1948 }
1894 1949
1895 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1950 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1896 pScsiReq->MsgFlags |= mpt_msg_flags(); 1951 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1897 1952
1898 1953
1899 /* verify that app has not requested 1954 /* verify that app has not requested
@@ -1935,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1935 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 1990 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
1936 pScsiReq->DataLength = cpu_to_le32(dataSize); 1991 pScsiReq->DataLength = cpu_to_le32(dataSize);
1937 1992
1938 ioc->ioctl->reset = MPTCTL_RESET_OK;
1939 ioc->ioctl->id = pScsiReq->TargetID;
1940 1993
1941 } else { 1994 } else {
1942 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1995 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1979,7 +2032,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1979 int dataSize; 2032 int dataSize;
1980 2033
1981 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 2034 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1982 pScsiReq->MsgFlags |= mpt_msg_flags(); 2035 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1983 2036
1984 2037
1985 /* verify that app has not requested 2038 /* verify that app has not requested
@@ -2014,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2014 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 2067 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
2015 pScsiReq->DataLength = cpu_to_le32(dataSize); 2068 pScsiReq->DataLength = cpu_to_le32(dataSize);
2016 2069
2017 ioc->ioctl->reset = MPTCTL_RESET_OK;
2018 ioc->ioctl->id = pScsiReq->TargetID;
2019 } else { 2070 } else {
2020 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2071 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2021 "SCSI driver is not loaded. \n", 2072 "SCSI driver is not loaded. \n",
@@ -2026,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2026 break; 2077 break;
2027 2078
2028 case MPI_FUNCTION_SCSI_TASK_MGMT: 2079 case MPI_FUNCTION_SCSI_TASK_MGMT:
2029 { 2080 {
2030 MPT_SCSI_HOST *hd = NULL; 2081 SCSITaskMgmt_t *pScsiTm;
2031 if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) { 2082 pScsiTm = (SCSITaskMgmt_t *)mf;
2032 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2083 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2033 "SCSI driver not loaded or SCSI host not found. \n", 2084 "\tTaskType=0x%x MsgFlags=0x%x "
2034 ioc->name, __FILE__, __LINE__); 2085 "TaskMsgContext=0x%x id=%d channel=%d\n",
2035 rc = -EFAULT; 2086 ioc->name, pScsiTm->TaskType, le32_to_cpu
2036 goto done_free_mem; 2087 (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
2037 } else if (mptctl_set_tm_flags(hd) != 0) { 2088 pScsiTm->TargetID, pScsiTm->Bus));
2038 rc = -EPERM;
2039 goto done_free_mem;
2040 }
2041 }
2042 break; 2089 break;
2090 }
2043 2091
2044 case MPI_FUNCTION_IOC_INIT: 2092 case MPI_FUNCTION_IOC_INIT:
2045 { 2093 {
@@ -2123,8 +2171,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2123 if (karg.dataInSize > 0) { 2171 if (karg.dataInSize > 0) {
2124 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2172 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2125 MPI_SGE_FLAGS_END_OF_BUFFER | 2173 MPI_SGE_FLAGS_END_OF_BUFFER |
2126 MPI_SGE_FLAGS_DIRECTION | 2174 MPI_SGE_FLAGS_DIRECTION)
2127 mpt_addr_size() )
2128 << MPI_SGE_FLAGS_SHIFT; 2175 << MPI_SGE_FLAGS_SHIFT;
2129 } else { 2176 } else {
2130 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; 2177 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
@@ -2141,8 +2188,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2141 /* Set up this SGE. 2188 /* Set up this SGE.
2142 * Copy to MF and to sglbuf 2189 * Copy to MF and to sglbuf
2143 */ 2190 */
2144 mpt_add_sge(psge, flagsLength, dma_addr_out); 2191 ioc->add_sge(psge, flagsLength, dma_addr_out);
2145 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2192 psge += ioc->SGE_size;
2146 2193
2147 /* Copy user data to kernel space. 2194 /* Copy user data to kernel space.
2148 */ 2195 */
@@ -2175,18 +2222,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2175 /* Set up this SGE 2222 /* Set up this SGE
2176 * Copy to MF and to sglbuf 2223 * Copy to MF and to sglbuf
2177 */ 2224 */
2178 mpt_add_sge(psge, flagsLength, dma_addr_in); 2225 ioc->add_sge(psge, flagsLength, dma_addr_in);
2179 } 2226 }
2180 } 2227 }
2181 } else { 2228 } else {
2182 /* Add a NULL SGE 2229 /* Add a NULL SGE
2183 */ 2230 */
2184 mpt_add_sge(psge, flagsLength, (dma_addr_t) -1); 2231 ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
2185 } 2232 }
2186 2233
2187 ioc->ioctl->wait_done = 0; 2234 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
2235 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2188 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { 2236 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
2189 2237
2238 mutex_lock(&ioc->taskmgmt_cmds.mutex);
2239 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
2240 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2241 goto done_free_mem;
2242 }
2243
2190 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 2244 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
2191 2245
2192 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 2246 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
@@ -2197,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2197 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); 2251 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
2198 if (rc != 0) { 2252 if (rc != 0) {
2199 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2253 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2200 "_send_handshake FAILED! (ioc %p, mf %p)\n", 2254 "send_handshake FAILED! (ioc %p, mf %p)\n",
2201 ioc->name, ioc, mf)); 2255 ioc->name, ioc, mf));
2202 mptctl_free_tm_flags(ioc); 2256 mpt_clear_taskmgmt_in_progress_flag(ioc);
2203 rc = -ENODATA; 2257 rc = -ENODATA;
2258 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2204 goto done_free_mem; 2259 goto done_free_mem;
2205 } 2260 }
2206 } 2261 }
@@ -2210,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2210 2265
2211 /* Now wait for the command to complete */ 2266 /* Now wait for the command to complete */
2212 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2267 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2213 timeout = wait_event_timeout(mptctl_wait, 2268retry_wait:
2214 ioc->ioctl->wait_done == 1, 2269 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2215 HZ*timeout); 2270 HZ*timeout);
2216 2271 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2217 if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) { 2272 rc = -ETIME;
2218 /* Now we need to reset the board */ 2273 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
2219 2274 ioc->name, __func__));
2220 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) 2275 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2221 mptctl_free_tm_flags(ioc); 2276 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2222 2277 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2223 mptctl_timeout_expired(ioc->ioctl); 2278 goto done_free_mem;
2224 rc = -ENODATA; 2279 }
2280 if (!timeleft) {
2281 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2282 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2283 mptctl_timeout_expired(ioc, mf);
2284 mf = NULL;
2285 } else
2286 goto retry_wait;
2225 goto done_free_mem; 2287 goto done_free_mem;
2226 } 2288 }
2227 2289
2290 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2291 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2292
2293
2228 mf = NULL; 2294 mf = NULL;
2229 2295
2230 /* If a valid reply frame, copy to the user. 2296 /* If a valid reply frame, copy to the user.
2231 * Offset 2: reply length in U32's 2297 * Offset 2: reply length in U32's
2232 */ 2298 */
2233 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { 2299 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
2234 if (karg.maxReplyBytes < ioc->reply_sz) { 2300 if (karg.maxReplyBytes < ioc->reply_sz) {
2235 sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); 2301 sz = min(karg.maxReplyBytes,
2302 4*ioc->ioctl_cmds.reply[2]);
2236 } else { 2303 } else {
2237 sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); 2304 sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
2238 } 2305 }
2239
2240 if (sz > 0) { 2306 if (sz > 0) {
2241 if (copy_to_user(karg.replyFrameBufPtr, 2307 if (copy_to_user(karg.replyFrameBufPtr,
2242 &ioc->ioctl->ReplyFrame, sz)){ 2308 ioc->ioctl_cmds.reply, sz)){
2243 printk(MYIOC_s_ERR_FMT 2309 printk(MYIOC_s_ERR_FMT
2244 "%s@%d::mptctl_do_mpt_command - " 2310 "%s@%d::mptctl_do_mpt_command - "
2245 "Unable to write out reply frame %p\n", 2311 "Unable to write out reply frame %p\n",
@@ -2252,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2252 2318
2253 /* If valid sense data, copy to user. 2319 /* If valid sense data, copy to user.
2254 */ 2320 */
2255 if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { 2321 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
2256 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); 2322 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
2257 if (sz > 0) { 2323 if (sz > 0) {
2258 if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { 2324 if (copy_to_user(karg.senseDataPtr,
2325 ioc->ioctl_cmds.sense, sz)) {
2259 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2326 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2260 "Unable to write sense data to user %p\n", 2327 "Unable to write sense data to user %p\n",
2261 ioc->name, __FILE__, __LINE__, 2328 ioc->name, __FILE__, __LINE__,
@@ -2269,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2269 /* If the overall status is _GOOD and data in, copy data 2336 /* If the overall status is _GOOD and data in, copy data
2270 * to user. 2337 * to user.
2271 */ 2338 */
2272 if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && 2339 if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
2273 (karg.dataInSize > 0) && (bufIn.kptr)) { 2340 (karg.dataInSize > 0) && (bufIn.kptr)) {
2274 2341
2275 if (copy_to_user(karg.dataInBufPtr, 2342 if (copy_to_user(karg.dataInBufPtr,
@@ -2284,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2284 2351
2285done_free_mem: 2352done_free_mem:
2286 2353
2287 ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD | 2354 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2288 MPT_IOCTL_STATUS_SENSE_VALID | 2355 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2289 MPT_IOCTL_STATUS_RF_VALID );
2290 2356
2291 /* Free the allocated memory. 2357 /* Free the allocated memory.
2292 */ 2358 */
@@ -2336,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2336 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; 2402 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2337 MPT_FRAME_HDR *mf = NULL; 2403 MPT_FRAME_HDR *mf = NULL;
2338 MPIHeader_t *mpi_hdr; 2404 MPIHeader_t *mpi_hdr;
2405 unsigned long timeleft;
2406 int retval;
2339 2407
2340 /* Reset long to int. Should affect IA64 and SPARC only 2408 /* Reset long to int. Should affect IA64 and SPARC only
2341 */ 2409 */
@@ -2466,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2466 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 2534 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
2467 2535
2468 if (hd && (cim_rev == 1)) { 2536 if (hd && (cim_rev == 1)) {
2469 karg.hard_resets = hd->hard_resets; 2537 karg.hard_resets = ioc->hard_resets;
2470 karg.soft_resets = hd->soft_resets; 2538 karg.soft_resets = ioc->soft_resets;
2471 karg.timeouts = hd->timeouts; 2539 karg.timeouts = ioc->timeouts;
2472 } 2540 }
2473 } 2541 }
2474 2542
@@ -2476,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2476 * Gather ISTWI(Industry Standard Two Wire Interface) Data 2544 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2477 */ 2545 */
2478 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2546 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2479 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2547 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
2480 ioc->name,__func__)); 2548 "%s, no msg frames!!\n", ioc->name, __func__));
2481 goto out; 2549 goto out;
2482 } 2550 }
2483 2551
@@ -2498,22 +2566,29 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2498 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2566 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2499 if (!pbuf) 2567 if (!pbuf)
2500 goto out; 2568 goto out;
2501 mpt_add_sge((char *)&IstwiRWRequest->SGL, 2569 ioc->add_sge((char *)&IstwiRWRequest->SGL,
2502 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); 2570 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2503 2571
2504 ioc->ioctl->wait_done = 0; 2572 retval = 0;
2573 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
2574 IstwiRWRequest->MsgContext);
2575 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2505 mpt_put_msg_frame(mptctl_id, ioc, mf); 2576 mpt_put_msg_frame(mptctl_id, ioc, mf);
2506 2577
2507 rc = wait_event_timeout(mptctl_wait, 2578retry_wait:
2508 ioc->ioctl->wait_done == 1, 2579 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2509 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); 2580 HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
2510 2581 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2511 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { 2582 retval = -ETIME;
2512 /* 2583 printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
2513 * Now we need to reset the board 2584 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2514 */ 2585 mpt_free_msg_frame(ioc, mf);
2515 mpt_free_msg_frame(ioc, mf); 2586 goto out;
2516 mptctl_timeout_expired(ioc->ioctl); 2587 }
2588 if (!timeleft)
2589 mptctl_timeout_expired(ioc, mf);
2590 else
2591 goto retry_wait;
2517 goto out; 2592 goto out;
2518 } 2593 }
2519 2594
@@ -2526,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2526 * bays have drives in them 2601 * bays have drives in them
2527 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) 2602 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2528 */ 2603 */
2529 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) 2604 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
2530 karg.rsvd = *(u32 *)pbuf; 2605 karg.rsvd = *(u32 *)pbuf;
2531 2606
2532 out: 2607 out:
2608 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2609 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2610
2533 if (pbuf) 2611 if (pbuf)
2534 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2612 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2535 2613
@@ -2753,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
2753 2831
2754 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); 2832 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
2755 2833
2756 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2834 mutex_unlock(&iocp->ioctl_cmds.mutex);
2757 2835
2758 return ret; 2836 return ret;
2759} 2837}
@@ -2807,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
2807 */ 2885 */
2808 ret = mptctl_do_mpt_command (karg, &uarg->MF); 2886 ret = mptctl_do_mpt_command (karg, &uarg->MF);
2809 2887
2810 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2888 mutex_unlock(&iocp->ioctl_cmds.mutex);
2811 2889
2812 return ret; 2890 return ret;
2813} 2891}
@@ -2859,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a
2859static int 2937static int
2860mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2938mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2861{ 2939{
2862 MPT_IOCTL *mem;
2863 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2940 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2864 2941
2865 /* 2942 mutex_init(&ioc->ioctl_cmds.mutex);
2866 * Allocate and inite a MPT_IOCTL structure 2943 init_completion(&ioc->ioctl_cmds.done);
2867 */
2868 mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL);
2869 if (!mem) {
2870 mptctl_remove(pdev);
2871 return -ENOMEM;
2872 }
2873
2874 ioc->ioctl = mem;
2875 ioc->ioctl->ioc = ioc;
2876 mutex_init(&ioc->ioctl->ioctl_mutex);
2877 return 0; 2944 return 0;
2878} 2945}
2879 2946
@@ -2887,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2887static void 2954static void
2888mptctl_remove(struct pci_dev *pdev) 2955mptctl_remove(struct pci_dev *pdev)
2889{ 2956{
2890 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2891
2892 kfree ( ioc->ioctl );
2893} 2957}
2894 2958
2895static struct mpt_pci_driver mptctl_driver = { 2959static struct mpt_pci_driver mptctl_driver = {
@@ -2929,6 +2993,7 @@ static int __init mptctl_init(void)
2929 goto out_fail; 2993 goto out_fail;
2930 } 2994 }
2931 2995
2996 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
2932 mpt_reset_register(mptctl_id, mptctl_ioc_reset); 2997 mpt_reset_register(mptctl_id, mptctl_ioc_reset);
2933 mpt_event_register(mptctl_id, mptctl_event_process); 2998 mpt_event_register(mptctl_id, mptctl_event_process);
2934 2999
@@ -2953,6 +3018,7 @@ static void mptctl_exit(void)
2953 3018
2954 /* De-register callback handler from base module */ 3019 /* De-register callback handler from base module */
2955 mpt_deregister(mptctl_id); 3020 mpt_deregister(mptctl_id);
3021 mpt_reset_deregister(mptctl_taskmgmt_id);
2956 3022
2957 mpt_device_driver_deregister(MPTCTL_DRIVER); 3023 mpt_device_driver_deregister(MPTCTL_DRIVER);
2958 3024
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 510b9f492093..28e478879284 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -58,6 +58,7 @@
58#define MPT_DEBUG_FC 0x00080000 58#define MPT_DEBUG_FC 0x00080000
59#define MPT_DEBUG_SAS 0x00100000 59#define MPT_DEBUG_SAS 0x00100000
60#define MPT_DEBUG_SAS_WIDE 0x00200000 60#define MPT_DEBUG_SAS_WIDE 0x00200000
61#define MPT_DEBUG_36GB_MEM 0x00400000
61 62
62/* 63/*
63 * CONFIG_FUSION_LOGGING - enabled in Kconfig 64 * CONFIG_FUSION_LOGGING - enabled in Kconfig
@@ -135,6 +136,8 @@
135#define dsaswideprintk(IOC, CMD) \ 136#define dsaswideprintk(IOC, CMD) \
136 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) 137 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
137 138
139#define d36memprintk(IOC, CMD) \
140 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
138 141
139 142
140/* 143/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c3c24fdf9fb6..e61df133a59e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1251 * A slightly different algorithm is required for 1251 * A slightly different algorithm is required for
1252 * 64bit SGEs. 1252 * 64bit SGEs.
1253 */ 1253 */
1254 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1254 scale = ioc->req_sz/ioc->SGE_size;
1255 if (sizeof(dma_addr_t) == sizeof(u64)) { 1255 if (ioc->sg_addr_size == sizeof(u64)) {
1256 numSGE = (scale - 1) * 1256 numSGE = (scale - 1) *
1257 (ioc->facts.MaxChainDepth-1) + scale + 1257 (ioc->facts.MaxChainDepth-1) + scale +
1258 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1258 (ioc->req_sz - 60) / ioc->SGE_size;
1259 sizeof(u32));
1260 } else { 1259 } else {
1261 numSGE = 1 + (scale - 1) * 1260 numSGE = 1 + (scale - 1) *
1262 (ioc->facts.MaxChainDepth-1) + scale + 1261 (ioc->facts.MaxChainDepth-1) + scale +
1263 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1262 (ioc->req_sz - 64) / ioc->SGE_size;
1264 sizeof(u32));
1265 } 1263 }
1266 1264
1267 if (numSGE < sh->sg_tablesize) { 1265 if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1292 1290
1293 /* Clear the TM flags 1291 /* Clear the TM flags
1294 */ 1292 */
1295 hd->tmPending = 0;
1296 hd->tmState = TM_STATE_NONE;
1297 hd->resetPending = 0;
1298 hd->abortSCpnt = NULL; 1293 hd->abortSCpnt = NULL;
1299 1294
1300 /* Clear the pointer used to store 1295 /* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1312 hd->timer.data = (unsigned long) hd; 1307 hd->timer.data = (unsigned long) hd;
1313 hd->timer.function = mptscsih_timer_expired; 1308 hd->timer.function = mptscsih_timer_expired;
1314 1309
1315 init_waitqueue_head(&hd->scandv_waitq);
1316 hd->scandv_wait_done = 0;
1317 hd->last_queue_full = 0; 1310 hd->last_queue_full = 0;
1318 1311
1319 sh->transportt = mptfc_transport_template; 1312 sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 79f5433359f9..20e0b447e8e8 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -93,8 +93,37 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; 93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ 94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; 95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
96 96static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
97static void mptsas_hotplug_work(struct work_struct *work); 97
98static void mptsas_firmware_event_work(struct work_struct *work);
99static void mptsas_send_sas_event(struct fw_event_work *fw_event);
100static void mptsas_send_raid_event(struct fw_event_work *fw_event);
101static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
102static void mptsas_parse_device_info(struct sas_identify *identify,
103 struct mptsas_devinfo *device_info);
104static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
105 struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
106static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
107 (MPT_ADAPTER *ioc, u64 sas_address);
108static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
109 struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
110static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
111 struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
112static int mptsas_add_end_device(MPT_ADAPTER *ioc,
113 struct mptsas_phyinfo *phy_info);
114static void mptsas_del_end_device(MPT_ADAPTER *ioc,
115 struct mptsas_phyinfo *phy_info);
116static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
117static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
118 (MPT_ADAPTER *ioc, u64 sas_address);
119static void mptsas_expander_delete(MPT_ADAPTER *ioc,
120 struct mptsas_portinfo *port_info, u8 force);
121static void mptsas_send_expander_event(struct fw_event_work *fw_event);
122static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
123static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
124static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
125static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
126static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
98 127
99static void mptsas_print_phy_data(MPT_ADAPTER *ioc, 128static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
100 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) 129 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -218,30 +247,125 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
218 le16_to_cpu(pg1->AttachedDevHandle))); 247 le16_to_cpu(pg1->AttachedDevHandle)));
219} 248}
220 249
221static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy) 250/* inhibit sas firmware event handling */
251static void
252mptsas_fw_event_off(MPT_ADAPTER *ioc)
222{ 253{
223 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 254 unsigned long flags;
224 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 255
256 spin_lock_irqsave(&ioc->fw_event_lock, flags);
257 ioc->fw_events_off = 1;
258 ioc->sas_discovery_quiesce_io = 0;
259 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
260
225} 261}
226 262
227static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) 263/* enable sas firmware event handling */
264static void
265mptsas_fw_event_on(MPT_ADAPTER *ioc)
228{ 266{
229 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); 267 unsigned long flags;
230 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 268
269 spin_lock_irqsave(&ioc->fw_event_lock, flags);
270 ioc->fw_events_off = 0;
271 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
231} 272}
232 273
233static struct mptsas_portinfo * 274/* queue a sas firmware event */
234mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) 275static void
276mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
277 unsigned long delay)
235{ 278{
236 struct list_head *head = &ioc->sas_topology; 279 unsigned long flags;
237 struct mptsas_portinfo *pi = NULL; 280
281 spin_lock_irqsave(&ioc->fw_event_lock, flags);
282 list_add_tail(&fw_event->list, &ioc->fw_event_list);
283 INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
284 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
285 ioc->name, __func__, fw_event));
286 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
287 delay);
288 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
289}
290
291/* requeue a sas firmware event */
292static void
293mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
294 unsigned long delay)
295{
296 unsigned long flags;
297 spin_lock_irqsave(&ioc->fw_event_lock, flags);
298 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
299 "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
300 fw_event->retries++;
301 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
302 msecs_to_jiffies(delay));
303 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
304}
305
306/* free memory assoicated to a sas firmware event */
307static void
308mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&ioc->fw_event_lock, flags);
313 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
314 ioc->name, __func__, fw_event));
315 list_del(&fw_event->list);
316 kfree(fw_event);
317 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
318}
319
320/* walk the firmware event queue, and either stop or wait for
321 * outstanding events to complete */
322static void
323mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
324{
325 struct fw_event_work *fw_event, *next;
326 struct mptsas_target_reset_event *target_reset_list, *n;
327 u8 flush_q;
328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
329
330 /* flush the target_reset_list */
331 if (!list_empty(&hd->target_reset_list)) {
332 list_for_each_entry_safe(target_reset_list, n,
333 &hd->target_reset_list, list) {
334 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
335 "%s: removing target reset for id=%d\n",
336 ioc->name, __func__,
337 target_reset_list->sas_event_data.TargetID));
338 list_del(&target_reset_list->list);
339 kfree(target_reset_list);
340 }
341 }
342
343 if (list_empty(&ioc->fw_event_list) ||
344 !ioc->fw_event_q || in_interrupt())
345 return;
238 346
239 /* always the first entry on sas_topology list */ 347 flush_q = 0;
348 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
349 if (cancel_delayed_work(&fw_event->work))
350 mptsas_free_fw_event(ioc, fw_event);
351 else
352 flush_q = 1;
353 }
354 if (flush_q)
355 flush_workqueue(ioc->fw_event_q);
356}
240 357
241 if (!list_empty(head))
242 pi = list_entry(head->next, struct mptsas_portinfo, list);
243 358
244 return pi; 359static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
360{
361 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
362 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
363}
364
365static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
366{
367 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
368 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
245} 369}
246 370
247/* 371/*
@@ -265,6 +389,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
265 return rc; 389 return rc;
266} 390}
267 391
392/**
393 * mptsas_find_portinfo_by_sas_address -
394 * @ioc: Pointer to MPT_ADAPTER structure
395 * @handle:
396 *
397 * This function should be called with the sas_topology_mutex already held
398 *
399 **/
400static struct mptsas_portinfo *
401mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
402{
403 struct mptsas_portinfo *port_info, *rc = NULL;
404 int i;
405
406 if (sas_address >= ioc->hba_port_sas_addr &&
407 sas_address < (ioc->hba_port_sas_addr +
408 ioc->hba_port_num_phy))
409 return ioc->hba_port_info;
410
411 mutex_lock(&ioc->sas_topology_mutex);
412 list_for_each_entry(port_info, &ioc->sas_topology, list)
413 for (i = 0; i < port_info->num_phys; i++)
414 if (port_info->phy_info[i].identify.sas_address ==
415 sas_address) {
416 rc = port_info;
417 goto out;
418 }
419 out:
420 mutex_unlock(&ioc->sas_topology_mutex);
421 return rc;
422}
423
268/* 424/*
269 * Returns true if there is a scsi end device 425 * Returns true if there is a scsi end device
270 */ 426 */
@@ -308,6 +464,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
308 if(phy_info->port_details != port_details) 464 if(phy_info->port_details != port_details)
309 continue; 465 continue;
310 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 466 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
467 mptsas_set_rphy(ioc, phy_info, NULL);
311 phy_info->port_details = NULL; 468 phy_info->port_details = NULL;
312 } 469 }
313 kfree(port_details); 470 kfree(port_details);
@@ -379,6 +536,285 @@ starget)
379 phy_info->port_details->starget = starget; 536 phy_info->port_details->starget = starget;
380} 537}
381 538
539/**
540 * mptsas_add_device_component -
541 * @ioc: Pointer to MPT_ADAPTER structure
542 * @channel: fw mapped id's
543 * @id:
544 * @sas_address:
545 * @device_info:
546 *
547 **/
548static void
549mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
550 u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
551{
552 struct mptsas_device_info *sas_info, *next;
553 struct scsi_device *sdev;
554 struct scsi_target *starget;
555 struct sas_rphy *rphy;
556
557 /*
558 * Delete all matching devices out of the list
559 */
560 mutex_lock(&ioc->sas_device_info_mutex);
561 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
562 list) {
563 if (!sas_info->is_logical_volume &&
564 (sas_info->sas_address == sas_address ||
565 (sas_info->fw.channel == channel &&
566 sas_info->fw.id == id))) {
567 list_del(&sas_info->list);
568 kfree(sas_info);
569 }
570 }
571
572 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
573 if (!sas_info)
574 goto out;
575
576 /*
577 * Set Firmware mapping
578 */
579 sas_info->fw.id = id;
580 sas_info->fw.channel = channel;
581
582 sas_info->sas_address = sas_address;
583 sas_info->device_info = device_info;
584 sas_info->slot = slot;
585 sas_info->enclosure_logical_id = enclosure_logical_id;
586 INIT_LIST_HEAD(&sas_info->list);
587 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
588
589 /*
590 * Set OS mapping
591 */
592 shost_for_each_device(sdev, ioc->sh) {
593 starget = scsi_target(sdev);
594 rphy = dev_to_rphy(starget->dev.parent);
595 if (rphy->identify.sas_address == sas_address) {
596 sas_info->os.id = starget->id;
597 sas_info->os.channel = starget->channel;
598 }
599 }
600
601 out:
602 mutex_unlock(&ioc->sas_device_info_mutex);
603 return;
604}
605
606/**
607 * mptsas_add_device_component_by_fw -
608 * @ioc: Pointer to MPT_ADAPTER structure
609 * @channel: fw mapped id's
610 * @id:
611 *
612 **/
613static void
614mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
615{
616 struct mptsas_devinfo sas_device;
617 struct mptsas_enclosure enclosure_info;
618 int rc;
619
620 rc = mptsas_sas_device_pg0(ioc, &sas_device,
621 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
622 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
623 (channel << 8) + id);
624 if (rc)
625 return;
626
627 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
628 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
629 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
630 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
631 sas_device.handle_enclosure);
632
633 mptsas_add_device_component(ioc, sas_device.channel,
634 sas_device.id, sas_device.sas_address, sas_device.device_info,
635 sas_device.slot, enclosure_info.enclosure_logical_id);
636}
637
638/**
639 * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
640 * @ioc: Pointer to MPT_ADAPTER structure
641 * @channel: fw mapped id's
642 * @id:
643 *
644 **/
645static void
646mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
647 struct scsi_target *starget)
648{
649 CONFIGPARMS cfg;
650 ConfigPageHeader_t hdr;
651 dma_addr_t dma_handle;
652 pRaidVolumePage0_t buffer = NULL;
653 int i;
654 RaidPhysDiskPage0_t phys_disk;
655 struct mptsas_device_info *sas_info, *next;
656
657 memset(&cfg, 0 , sizeof(CONFIGPARMS));
658 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
659 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
660 /* assumption that all volumes on channel = 0 */
661 cfg.pageAddr = starget->id;
662 cfg.cfghdr.hdr = &hdr;
663 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
664 cfg.timeout = 10;
665
666 if (mpt_config(ioc, &cfg) != 0)
667 goto out;
668
669 if (!hdr.PageLength)
670 goto out;
671
672 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
673 &dma_handle);
674
675 if (!buffer)
676 goto out;
677
678 cfg.physAddr = dma_handle;
679 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
680
681 if (mpt_config(ioc, &cfg) != 0)
682 goto out;
683
684 if (!buffer->NumPhysDisks)
685 goto out;
686
687 /*
688 * Adding entry for hidden components
689 */
690 for (i = 0; i < buffer->NumPhysDisks; i++) {
691
692 if (mpt_raid_phys_disk_pg0(ioc,
693 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
694 continue;
695
696 mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
697 phys_disk.PhysDiskID);
698
699 mutex_lock(&ioc->sas_device_info_mutex);
700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
701 list) {
702 if (!sas_info->is_logical_volume &&
703 (sas_info->fw.channel == phys_disk.PhysDiskBus &&
704 sas_info->fw.id == phys_disk.PhysDiskID)) {
705 sas_info->is_hidden_raid_component = 1;
706 sas_info->volume_id = starget->id;
707 }
708 }
709 mutex_unlock(&ioc->sas_device_info_mutex);
710
711 }
712
713 /*
714 * Delete all matching devices out of the list
715 */
716 mutex_lock(&ioc->sas_device_info_mutex);
717 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
718 list) {
719 if (sas_info->is_logical_volume && sas_info->fw.id ==
720 starget->id) {
721 list_del(&sas_info->list);
722 kfree(sas_info);
723 }
724 }
725
726 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
727 if (sas_info) {
728 sas_info->fw.id = starget->id;
729 sas_info->os.id = starget->id;
730 sas_info->os.channel = starget->channel;
731 sas_info->is_logical_volume = 1;
732 INIT_LIST_HEAD(&sas_info->list);
733 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
734 }
735 mutex_unlock(&ioc->sas_device_info_mutex);
736
737 out:
738 if (buffer)
739 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
740 dma_handle);
741}
742
743/**
744 * mptsas_add_device_component_starget -
745 * @ioc: Pointer to MPT_ADAPTER structure
746 * @starget:
747 *
748 **/
749static void
750mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
751 struct scsi_target *starget)
752{
753 VirtTarget *vtarget;
754 struct sas_rphy *rphy;
755 struct mptsas_phyinfo *phy_info = NULL;
756 struct mptsas_enclosure enclosure_info;
757
758 rphy = dev_to_rphy(starget->dev.parent);
759 vtarget = starget->hostdata;
760 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
761 rphy->identify.sas_address);
762 if (!phy_info)
763 return;
764
765 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
766 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
767 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
768 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
769 phy_info->attached.handle_enclosure);
770
771 mptsas_add_device_component(ioc, phy_info->attached.channel,
772 phy_info->attached.id, phy_info->attached.sas_address,
773 phy_info->attached.device_info,
774 phy_info->attached.slot, enclosure_info.enclosure_logical_id);
775}
776
777/**
778 * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
779 * @ioc: Pointer to MPT_ADAPTER structure
780 * @channel: os mapped id's
781 * @id:
782 *
783 **/
784static void
785mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
786{
787 struct mptsas_device_info *sas_info, *next;
788
789 /*
790 * Set is_cached flag
791 */
792 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
793 list) {
794 if (sas_info->os.channel == channel && sas_info->os.id == id)
795 sas_info->is_cached = 1;
796 }
797}
798
799/**
800 * mptsas_del_device_components - Cleaning the list
801 * @ioc: Pointer to MPT_ADAPTER structure
802 *
803 **/
804static void
805mptsas_del_device_components(MPT_ADAPTER *ioc)
806{
807 struct mptsas_device_info *sas_info, *next;
808
809 mutex_lock(&ioc->sas_device_info_mutex);
810 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
811 list) {
812 list_del(&sas_info->list);
813 kfree(sas_info);
814 }
815 mutex_unlock(&ioc->sas_device_info_mutex);
816}
817
382 818
383/* 819/*
384 * mptsas_setup_wide_ports 820 * mptsas_setup_wide_ports
@@ -434,8 +870,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
434 * Forming a port 870 * Forming a port
435 */ 871 */
436 if (!port_details) { 872 if (!port_details) {
437 port_details = kzalloc(sizeof(*port_details), 873 port_details = kzalloc(sizeof(struct
438 GFP_KERNEL); 874 mptsas_portinfo_details), GFP_KERNEL);
439 if (!port_details) 875 if (!port_details)
440 goto out; 876 goto out;
441 port_details->num_phys = 1; 877 port_details->num_phys = 1;
@@ -523,15 +959,62 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
523 VirtTarget *vtarget = NULL; 959 VirtTarget *vtarget = NULL;
524 960
525 shost_for_each_device(sdev, ioc->sh) { 961 shost_for_each_device(sdev, ioc->sh) {
526 if ((vdevice = sdev->hostdata) == NULL) 962 vdevice = sdev->hostdata;
963 if ((vdevice == NULL) ||
964 (vdevice->vtarget == NULL))
965 continue;
966 if ((vdevice->vtarget->tflags &
967 MPT_TARGET_FLAGS_RAID_COMPONENT ||
968 vdevice->vtarget->raidVolume))
527 continue; 969 continue;
528 if (vdevice->vtarget->id == id && 970 if (vdevice->vtarget->id == id &&
529 vdevice->vtarget->channel == channel) 971 vdevice->vtarget->channel == channel)
530 vtarget = vdevice->vtarget; 972 vtarget = vdevice->vtarget;
531 } 973 }
532 return vtarget; 974 return vtarget;
533} 975}
534 976
977static void
978mptsas_queue_device_delete(MPT_ADAPTER *ioc,
979 MpiEventDataSasDeviceStatusChange_t *sas_event_data)
980{
981 struct fw_event_work *fw_event;
982 int sz;
983
984 sz = offsetof(struct fw_event_work, event_data) +
985 sizeof(MpiEventDataSasDeviceStatusChange_t);
986 fw_event = kzalloc(sz, GFP_ATOMIC);
987 if (!fw_event) {
988 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
989 ioc->name, __func__, __LINE__);
990 return;
991 }
992 memcpy(fw_event->event_data, sas_event_data,
993 sizeof(MpiEventDataSasDeviceStatusChange_t));
994 fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
995 fw_event->ioc = ioc;
996 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
997}
998
999static void
1000mptsas_queue_rescan(MPT_ADAPTER *ioc)
1001{
1002 struct fw_event_work *fw_event;
1003 int sz;
1004
1005 sz = offsetof(struct fw_event_work, event_data);
1006 fw_event = kzalloc(sz, GFP_ATOMIC);
1007 if (!fw_event) {
1008 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
1009 ioc->name, __func__, __LINE__);
1010 return;
1011 }
1012 fw_event->event = -1;
1013 fw_event->ioc = ioc;
1014 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
1015}
1016
1017
535/** 1018/**
536 * mptsas_target_reset 1019 * mptsas_target_reset
537 * 1020 *
@@ -550,13 +1033,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
550{ 1033{
551 MPT_FRAME_HDR *mf; 1034 MPT_FRAME_HDR *mf;
552 SCSITaskMgmt_t *pScsiTm; 1035 SCSITaskMgmt_t *pScsiTm;
553 1036 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
556 ioc->name,__func__, __LINE__));
557 return 0; 1037 return 0;
1038
1039
1040 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
1041 if (mf == NULL) {
1042 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
1043 "%s, no msg frames @%d!!\n", ioc->name,
1044 __func__, __LINE__));
1045 goto out_fail;
558 } 1046 }
559 1047
1048 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1049 ioc->name, mf));
1050
560 /* Format the Request 1051 /* Format the Request
561 */ 1052 */
562 pScsiTm = (SCSITaskMgmt_t *) mf; 1053 pScsiTm = (SCSITaskMgmt_t *) mf;
@@ -569,9 +1060,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
569 1060
570 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 1061 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
571 1062
572 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1063 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1064 "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
1065 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
1066
1067 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
573 1068
574 return 1; 1069 return 1;
1070
1071 out_fail:
1072
1073 mpt_clear_taskmgmt_in_progress_flag(ioc);
1074 return 0;
575} 1075}
576 1076
577/** 1077/**
@@ -602,11 +1102,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
602 1102
603 vtarget->deleted = 1; /* block IO */ 1103 vtarget->deleted = 1; /* block IO */
604 1104
605 target_reset_list = kzalloc(sizeof(*target_reset_list), 1105 target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
606 GFP_ATOMIC); 1106 GFP_ATOMIC);
607 if (!target_reset_list) { 1107 if (!target_reset_list) {
608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 1108 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
609 ioc->name,__func__, __LINE__)); 1109 "%s, failed to allocate mem @%d..!!\n",
1110 ioc->name, __func__, __LINE__));
610 return; 1111 return;
611 } 1112 }
612 1113
@@ -614,84 +1115,101 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
614 sizeof(*sas_event_data)); 1115 sizeof(*sas_event_data));
615 list_add_tail(&target_reset_list->list, &hd->target_reset_list); 1116 list_add_tail(&target_reset_list->list, &hd->target_reset_list);
616 1117
617 if (hd->resetPending) 1118 target_reset_list->time_count = jiffies;
618 return;
619 1119
620 if (mptsas_target_reset(ioc, channel, id)) { 1120 if (mptsas_target_reset(ioc, channel, id)) {
621 target_reset_list->target_reset_issued = 1; 1121 target_reset_list->target_reset_issued = 1;
622 hd->resetPending = 1;
623 } 1122 }
624} 1123}
625 1124
626/** 1125/**
627 * mptsas_dev_reset_complete 1126 * mptsas_taskmgmt_complete - complete SAS task management function
628 * 1127 * @ioc: Pointer to MPT_ADAPTER structure
629 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
630 * enable work queue to finish off removing device from upper layers.
631 * then send next TARGET_RESET in the queue.
632 *
633 * @ioc
634 * 1128 *
1129 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
1130 * queue to finish off removing device from upper layers. then send next
1131 * TARGET_RESET in the queue.
635 **/ 1132 **/
636static void 1133static int
637mptsas_dev_reset_complete(MPT_ADAPTER *ioc) 1134mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
638{ 1135{
639 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 1136 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
640 struct list_head *head = &hd->target_reset_list; 1137 struct list_head *head = &hd->target_reset_list;
641 struct mptsas_target_reset_event *target_reset_list;
642 struct mptsas_hotplug_event *ev;
643 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
644 u8 id, channel; 1138 u8 id, channel;
645 __le64 sas_address; 1139 struct mptsas_target_reset_event *target_reset_list;
1140 SCSITaskMgmtReply_t *pScsiTmReply;
1141
1142 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
1143 "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
1144
1145 pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
1146 if (pScsiTmReply) {
1147 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1148 "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
1149 "\ttask_type = 0x%02X, iocstatus = 0x%04X "
1150 "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
1151 "term_cmnds = %d\n", ioc->name,
1152 pScsiTmReply->Bus, pScsiTmReply->TargetID,
1153 pScsiTmReply->TaskType,
1154 le16_to_cpu(pScsiTmReply->IOCStatus),
1155 le32_to_cpu(pScsiTmReply->IOCLogInfo),
1156 pScsiTmReply->ResponseCode,
1157 le32_to_cpu(pScsiTmReply->TerminationCount)));
1158
1159 if (pScsiTmReply->ResponseCode)
1160 mptscsih_taskmgmt_response_code(ioc,
1161 pScsiTmReply->ResponseCode);
1162 }
1163
1164 if (pScsiTmReply && (pScsiTmReply->TaskType ==
1165 MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
1166 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
1167 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1168 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
1169 memcpy(ioc->taskmgmt_cmds.reply, mr,
1170 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
1171 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
1172 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
1173 complete(&ioc->taskmgmt_cmds.done);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179 mpt_clear_taskmgmt_in_progress_flag(ioc);
646 1180
647 if (list_empty(head)) 1181 if (list_empty(head))
648 return; 1182 return 1;
649 1183
650 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); 1184 target_reset_list = list_entry(head->next,
1185 struct mptsas_target_reset_event, list);
651 1186
652 sas_event_data = &target_reset_list->sas_event_data; 1187 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
653 id = sas_event_data->TargetID; 1188 "TaskMgmt: completed (%d seconds)\n",
654 channel = sas_event_data->Bus; 1189 ioc->name, jiffies_to_msecs(jiffies -
655 hd->resetPending = 0; 1190 target_reset_list->time_count)/1000));
1191
1192 id = pScsiTmReply->TargetID;
1193 channel = pScsiTmReply->Bus;
1194 target_reset_list->time_count = jiffies;
656 1195
657 /* 1196 /*
658 * retry target reset 1197 * retry target reset
659 */ 1198 */
660 if (!target_reset_list->target_reset_issued) { 1199 if (!target_reset_list->target_reset_issued) {
661 if (mptsas_target_reset(ioc, channel, id)) { 1200 if (mptsas_target_reset(ioc, channel, id))
662 target_reset_list->target_reset_issued = 1; 1201 target_reset_list->target_reset_issued = 1;
663 hd->resetPending = 1; 1202 return 1;
664 }
665 return;
666 } 1203 }
667 1204
668 /* 1205 /*
669 * enable work queue to remove device from upper layers 1206 * enable work queue to remove device from upper layers
670 */ 1207 */
671 list_del(&target_reset_list->list); 1208 list_del(&target_reset_list->list);
1209 if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
1210 mptsas_queue_device_delete(ioc,
1211 &target_reset_list->sas_event_data);
672 1212
673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
674 if (!ev) {
675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
676 ioc->name,__func__, __LINE__));
677 return;
678 }
679
680 INIT_WORK(&ev->work, mptsas_hotplug_work);
681 ev->ioc = ioc;
682 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
683 ev->parent_handle =
684 le16_to_cpu(sas_event_data->ParentDevHandle);
685 ev->channel = channel;
686 ev->id =id;
687 ev->phy_id = sas_event_data->PhyNum;
688 memcpy(&sas_address, &sas_event_data->SASAddress,
689 sizeof(__le64));
690 ev->sas_address = le64_to_cpu(sas_address);
691 ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
692 ev->event_type = MPTSAS_DEL_DEVICE;
693 schedule_work(&ev->work);
694 kfree(target_reset_list);
695 1213
696 /* 1214 /*
697 * issue target reset to next device in the queue 1215 * issue target reset to next device in the queue
@@ -699,34 +1217,19 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
699 1217
700 head = &hd->target_reset_list; 1218 head = &hd->target_reset_list;
701 if (list_empty(head)) 1219 if (list_empty(head))
702 return; 1220 return 1;
703 1221
704 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, 1222 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
705 list); 1223 list);
706 1224
707 sas_event_data = &target_reset_list->sas_event_data; 1225 id = target_reset_list->sas_event_data.TargetID;
708 id = sas_event_data->TargetID; 1226 channel = target_reset_list->sas_event_data.Bus;
709 channel = sas_event_data->Bus; 1227 target_reset_list->time_count = jiffies;
710 1228
711 if (mptsas_target_reset(ioc, channel, id)) { 1229 if (mptsas_target_reset(ioc, channel, id))
712 target_reset_list->target_reset_issued = 1; 1230 target_reset_list->target_reset_issued = 1;
713 hd->resetPending = 1;
714 }
715}
716 1231
717/** 1232 return 1;
718 * mptsas_taskmgmt_complete
719 *
720 * @ioc
721 * @mf
722 * @mr
723 *
724 **/
725static int
726mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
727{
728 mptsas_dev_reset_complete(ioc);
729 return mptscsih_taskmgmt_complete(ioc, mf, mr);
730} 1233}
731 1234
732/** 1235/**
@@ -740,37 +1243,59 @@ static int
740mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 1243mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
741{ 1244{
742 MPT_SCSI_HOST *hd; 1245 MPT_SCSI_HOST *hd;
743 struct mptsas_target_reset_event *target_reset_list, *n;
744 int rc; 1246 int rc;
745 1247
746 rc = mptscsih_ioc_reset(ioc, reset_phase); 1248 rc = mptscsih_ioc_reset(ioc, reset_phase);
1249 if ((ioc->bus_type != SAS) || (!rc))
1250 return rc;
747 1251
748 if (ioc->bus_type != SAS)
749 goto out;
750
751 if (reset_phase != MPT_IOC_POST_RESET)
752 goto out;
753
754 if (!ioc->sh || !ioc->sh->hostdata)
755 goto out;
756 hd = shost_priv(ioc->sh); 1252 hd = shost_priv(ioc->sh);
757 if (!hd->ioc) 1253 if (!hd->ioc)
758 goto out; 1254 goto out;
759 1255
760 if (list_empty(&hd->target_reset_list)) 1256 switch (reset_phase) {
761 goto out; 1257 case MPT_IOC_SETUP_RESET:
762 1258 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
763 /* flush the target_reset_list */ 1259 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
764 list_for_each_entry_safe(target_reset_list, n, 1260 mptsas_fw_event_off(ioc);
765 &hd->target_reset_list, list) { 1261 break;
766 list_del(&target_reset_list->list); 1262 case MPT_IOC_PRE_RESET:
767 kfree(target_reset_list); 1263 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1264 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
1265 break;
1266 case MPT_IOC_POST_RESET:
1267 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1268 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
1269 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1270 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
1271 complete(&ioc->sas_mgmt.done);
1272 }
1273 mptsas_cleanup_fw_event_q(ioc);
1274 mptsas_queue_rescan(ioc);
1275 mptsas_fw_event_on(ioc);
1276 break;
1277 default:
1278 break;
768 } 1279 }
769 1280
770 out: 1281 out:
771 return rc; 1282 return rc;
772} 1283}
773 1284
1285
1286/**
1287 * enum device_state -
1288 * @DEVICE_RETRY: need to retry the TUR
1289 * @DEVICE_ERROR: TUR return error, don't add device
1290 * @DEVICE_READY: device can be added
1291 *
1292 */
1293enum device_state{
1294 DEVICE_RETRY,
1295 DEVICE_ERROR,
1296 DEVICE_READY,
1297};
1298
774static int 1299static int
775mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 1300mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
776 u32 form, u32 form_specific) 1301 u32 form, u32 form_specific)
@@ -836,15 +1361,308 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
836 return error; 1361 return error;
837} 1362}
838 1363
1364/**
1365 * mptsas_add_end_device - report a new end device to sas transport layer
1366 * @ioc: Pointer to MPT_ADAPTER structure
1367 * @phy_info: decribes attached device
1368 *
1369 * return (0) success (1) failure
1370 *
1371 **/
1372static int
1373mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1374{
1375 struct sas_rphy *rphy;
1376 struct sas_port *port;
1377 struct sas_identify identify;
1378 char *ds = NULL;
1379 u8 fw_id;
1380
1381 if (!phy_info) {
1382 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1383 "%s: exit at line=%d\n", ioc->name,
1384 __func__, __LINE__));
1385 return 1;
1386 }
1387
1388 fw_id = phy_info->attached.id;
1389
1390 if (mptsas_get_rphy(phy_info)) {
1391 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1392 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1393 __func__, fw_id, __LINE__));
1394 return 2;
1395 }
1396
1397 port = mptsas_get_port(phy_info);
1398 if (!port) {
1399 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1400 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1401 __func__, fw_id, __LINE__));
1402 return 3;
1403 }
1404
1405 if (phy_info->attached.device_info &
1406 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1407 ds = "ssp";
1408 if (phy_info->attached.device_info &
1409 MPI_SAS_DEVICE_INFO_STP_TARGET)
1410 ds = "stp";
1411 if (phy_info->attached.device_info &
1412 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1413 ds = "sata";
1414
1415 printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
1416 " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
1417 phy_info->attached.channel, phy_info->attached.id,
1418 phy_info->attached.phy_id, (unsigned long long)
1419 phy_info->attached.sas_address);
1420
1421 mptsas_parse_device_info(&identify, &phy_info->attached);
1422 rphy = sas_end_device_alloc(port);
1423 if (!rphy) {
1424 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1425 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1426 __func__, fw_id, __LINE__));
1427 return 5; /* non-fatal: an rphy can be added later */
1428 }
1429
1430 rphy->identify = identify;
1431 if (sas_rphy_add(rphy)) {
1432 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1433 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1434 __func__, fw_id, __LINE__));
1435 sas_rphy_free(rphy);
1436 return 6;
1437 }
1438 mptsas_set_rphy(ioc, phy_info, rphy);
1439 return 0;
1440}
1441
1442/**
1443 * mptsas_del_end_device - report a deleted end device to sas transport layer
1444 * @ioc: Pointer to MPT_ADAPTER structure
1445 * @phy_info: decribes attached device
1446 *
1447 **/
1448static void
1449mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1450{
1451 struct sas_rphy *rphy;
1452 struct sas_port *port;
1453 struct mptsas_portinfo *port_info;
1454 struct mptsas_phyinfo *phy_info_parent;
1455 int i;
1456 char *ds = NULL;
1457 u8 fw_id;
1458 u64 sas_address;
1459
1460 if (!phy_info)
1461 return;
1462
1463 fw_id = phy_info->attached.id;
1464 sas_address = phy_info->attached.sas_address;
1465
1466 if (!phy_info->port_details) {
1467 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1468 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1469 __func__, fw_id, __LINE__));
1470 return;
1471 }
1472 rphy = mptsas_get_rphy(phy_info);
1473 if (!rphy) {
1474 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1475 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1476 __func__, fw_id, __LINE__));
1477 return;
1478 }
1479
1480 if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
1481 || phy_info->attached.device_info
1482 & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
1483 || phy_info->attached.device_info
1484 & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
1485 ds = "initiator";
1486 if (phy_info->attached.device_info &
1487 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1488 ds = "ssp";
1489 if (phy_info->attached.device_info &
1490 MPI_SAS_DEVICE_INFO_STP_TARGET)
1491 ds = "stp";
1492 if (phy_info->attached.device_info &
1493 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1494 ds = "sata";
1495
1496 dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
1497 "removing %s device: fw_channel %d, fw_id %d, phy %d,"
1498 "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
1499 phy_info->attached.id, phy_info->attached.phy_id,
1500 (unsigned long long) sas_address);
1501
1502 port = mptsas_get_port(phy_info);
1503 if (!port) {
1504 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1505 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1506 __func__, fw_id, __LINE__));
1507 return;
1508 }
1509 port_info = phy_info->portinfo;
1510 phy_info_parent = port_info->phy_info;
1511 for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
1512 if (!phy_info_parent->phy)
1513 continue;
1514 if (phy_info_parent->attached.sas_address !=
1515 sas_address)
1516 continue;
1517 dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
1518 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
1519 ioc->name, phy_info_parent->phy_id,
1520 phy_info_parent->phy);
1521 sas_port_delete_phy(port, phy_info_parent->phy);
1522 }
1523
1524 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
1525 "delete port %d, sas_addr (0x%llx)\n", ioc->name,
1526 port->port_identifier, (unsigned long long)sas_address);
1527 sas_port_delete(port);
1528 mptsas_set_port(ioc, phy_info, NULL);
1529 mptsas_port_delete(ioc, phy_info->port_details);
1530}
1531
1532struct mptsas_phyinfo *
1533mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
1534 struct mptsas_devinfo *sas_device)
1535{
1536 struct mptsas_phyinfo *phy_info;
1537 struct mptsas_portinfo *port_info;
1538 int i;
1539
1540 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
1541 sas_device->sas_address);
1542 if (!phy_info)
1543 goto out;
1544 port_info = phy_info->portinfo;
1545 if (!port_info)
1546 goto out;
1547 mutex_lock(&ioc->sas_topology_mutex);
1548 for (i = 0; i < port_info->num_phys; i++) {
1549 if (port_info->phy_info[i].attached.sas_address !=
1550 sas_device->sas_address)
1551 continue;
1552 port_info->phy_info[i].attached.channel = sas_device->channel;
1553 port_info->phy_info[i].attached.id = sas_device->id;
1554 port_info->phy_info[i].attached.sas_address =
1555 sas_device->sas_address;
1556 port_info->phy_info[i].attached.handle = sas_device->handle;
1557 port_info->phy_info[i].attached.handle_parent =
1558 sas_device->handle_parent;
1559 port_info->phy_info[i].attached.handle_enclosure =
1560 sas_device->handle_enclosure;
1561 }
1562 mutex_unlock(&ioc->sas_topology_mutex);
1563 out:
1564 return phy_info;
1565}
1566
1567/**
1568 * mptsas_firmware_event_work - work thread for processing fw events
1569 * @work: work queue payload containing info describing the event
1570 * Context: user
1571 *
1572 */
1573static void
1574mptsas_firmware_event_work(struct work_struct *work)
1575{
1576 struct fw_event_work *fw_event =
1577 container_of(work, struct fw_event_work, work.work);
1578 MPT_ADAPTER *ioc = fw_event->ioc;
1579
1580 /* special rescan topology handling */
1581 if (fw_event->event == -1) {
1582 if (ioc->in_rescan) {
1583 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1584 "%s: rescan ignored as it is in progress\n",
1585 ioc->name, __func__));
1586 return;
1587 }
1588 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
1589 "reset\n", ioc->name, __func__));
1590 ioc->in_rescan = 1;
1591 mptsas_not_responding_devices(ioc);
1592 mptsas_scan_sas_topology(ioc);
1593 ioc->in_rescan = 0;
1594 mptsas_free_fw_event(ioc, fw_event);
1595 return;
1596 }
1597
1598 /* events handling turned off during host reset */
1599 if (ioc->fw_events_off) {
1600 mptsas_free_fw_event(ioc, fw_event);
1601 return;
1602 }
1603
1604 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
1605 "event = (0x%02x)\n", ioc->name, __func__, fw_event,
1606 (fw_event->event & 0xFF)));
1607
1608 switch (fw_event->event) {
1609 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1610 mptsas_send_sas_event(fw_event);
1611 break;
1612 case MPI_EVENT_INTEGRATED_RAID:
1613 mptsas_send_raid_event(fw_event);
1614 break;
1615 case MPI_EVENT_IR2:
1616 mptsas_send_ir2_event(fw_event);
1617 break;
1618 case MPI_EVENT_PERSISTENT_TABLE_FULL:
1619 mptbase_sas_persist_operation(ioc,
1620 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1621 mptsas_free_fw_event(ioc, fw_event);
1622 break;
1623 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
1624 mptsas_broadcast_primative_work(fw_event);
1625 break;
1626 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
1627 mptsas_send_expander_event(fw_event);
1628 break;
1629 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1630 mptsas_send_link_status_event(fw_event);
1631 break;
1632 case MPI_EVENT_QUEUE_FULL:
1633 mptsas_handle_queue_full_event(fw_event);
1634 break;
1635 }
1636}
1637
1638
1639
839static int 1640static int
840mptsas_slave_configure(struct scsi_device *sdev) 1641mptsas_slave_configure(struct scsi_device *sdev)
841{ 1642{
1643 struct Scsi_Host *host = sdev->host;
1644 MPT_SCSI_HOST *hd = shost_priv(host);
1645 MPT_ADAPTER *ioc = hd->ioc;
1646 VirtDevice *vdevice = sdev->hostdata;
842 1647
843 if (sdev->channel == MPTSAS_RAID_CHANNEL) 1648 if (vdevice->vtarget->deleted) {
1649 sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
1650 vdevice->vtarget->deleted = 0;
1651 }
1652
1653 /*
1654 * RAID volumes placed beyond the last expected port.
1655 * Ignore sending sas mode pages in that case..
1656 */
1657 if (sdev->channel == MPTSAS_RAID_CHANNEL) {
1658 mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
844 goto out; 1659 goto out;
1660 }
845 1661
846 sas_read_port_mode_page(sdev); 1662 sas_read_port_mode_page(sdev);
847 1663
1664 mptsas_add_device_component_starget(ioc, scsi_target(sdev));
1665
848 out: 1666 out:
849 return mptscsih_slave_configure(sdev); 1667 return mptscsih_slave_configure(sdev);
850} 1668}
@@ -875,9 +1693,18 @@ mptsas_target_alloc(struct scsi_target *starget)
875 * RAID volumes placed beyond the last expected port. 1693 * RAID volumes placed beyond the last expected port.
876 */ 1694 */
877 if (starget->channel == MPTSAS_RAID_CHANNEL) { 1695 if (starget->channel == MPTSAS_RAID_CHANNEL) {
878 for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) 1696 if (!ioc->raid_data.pIocPg2) {
879 if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) 1697 kfree(vtarget);
880 channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; 1698 return -ENXIO;
1699 }
1700 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1701 if (id == ioc->raid_data.pIocPg2->
1702 RaidVolume[i].VolumeID) {
1703 channel = ioc->raid_data.pIocPg2->
1704 RaidVolume[i].VolumeBus;
1705 }
1706 }
1707 vtarget->raidVolume = 1;
881 goto out; 1708 goto out;
882 } 1709 }
883 1710
@@ -926,11 +1753,18 @@ mptsas_target_destroy(struct scsi_target *starget)
926 struct sas_rphy *rphy; 1753 struct sas_rphy *rphy;
927 struct mptsas_portinfo *p; 1754 struct mptsas_portinfo *p;
928 int i; 1755 int i;
929 MPT_ADAPTER *ioc = hd->ioc; 1756 MPT_ADAPTER *ioc = hd->ioc;
1757 VirtTarget *vtarget;
930 1758
931 if (!starget->hostdata) 1759 if (!starget->hostdata)
932 return; 1760 return;
933 1761
1762 vtarget = starget->hostdata;
1763
1764 mptsas_del_device_component_by_os(ioc, starget->channel,
1765 starget->id);
1766
1767
934 if (starget->channel == MPTSAS_RAID_CHANNEL) 1768 if (starget->channel == MPTSAS_RAID_CHANNEL)
935 goto out; 1769 goto out;
936 1770
@@ -940,12 +1774,21 @@ mptsas_target_destroy(struct scsi_target *starget)
940 if (p->phy_info[i].attached.sas_address != 1774 if (p->phy_info[i].attached.sas_address !=
941 rphy->identify.sas_address) 1775 rphy->identify.sas_address)
942 continue; 1776 continue;
1777
1778 starget_printk(KERN_INFO, starget, MYIOC_s_FMT
1779 "delete device: fw_channel %d, fw_id %d, phy %d, "
1780 "sas_addr 0x%llx\n", ioc->name,
1781 p->phy_info[i].attached.channel,
1782 p->phy_info[i].attached.id,
1783 p->phy_info[i].attached.phy_id, (unsigned long long)
1784 p->phy_info[i].attached.sas_address);
1785
943 mptsas_set_starget(&p->phy_info[i], NULL); 1786 mptsas_set_starget(&p->phy_info[i], NULL);
944 goto out;
945 } 1787 }
946 } 1788 }
947 1789
948 out: 1790 out:
1791 vtarget->starget = NULL;
949 kfree(starget->hostdata); 1792 kfree(starget->hostdata);
950 starget->hostdata = NULL; 1793 starget->hostdata = NULL;
951} 1794}
@@ -1008,6 +1851,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
1008static int 1851static int
1009mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1852mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1010{ 1853{
1854 MPT_SCSI_HOST *hd;
1855 MPT_ADAPTER *ioc;
1011 VirtDevice *vdevice = SCpnt->device->hostdata; 1856 VirtDevice *vdevice = SCpnt->device->hostdata;
1012 1857
1013 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { 1858 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
@@ -1016,6 +1861,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1016 return 0; 1861 return 0;
1017 } 1862 }
1018 1863
1864 hd = shost_priv(SCpnt->device->host);
1865 ioc = hd->ioc;
1866
1867 if (ioc->sas_discovery_quiesce_io)
1868 return SCSI_MLQUEUE_HOST_BUSY;
1869
1019// scsi_print_command(SCpnt); 1870// scsi_print_command(SCpnt);
1020 1871
1021 return mptscsih_qcmd(SCpnt,done); 1872 return mptscsih_qcmd(SCpnt,done);
@@ -1114,14 +1965,19 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
1114static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, 1965static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
1115 MPT_FRAME_HDR *reply) 1966 MPT_FRAME_HDR *reply)
1116{ 1967{
1117 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD; 1968 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1118 if (reply != NULL) { 1969 if (reply != NULL) {
1119 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID; 1970 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
1120 memcpy(ioc->sas_mgmt.reply, reply, 1971 memcpy(ioc->sas_mgmt.reply, reply,
1121 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); 1972 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
1122 } 1973 }
1123 complete(&ioc->sas_mgmt.done); 1974
1124 return 1; 1975 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1976 ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
1977 complete(&ioc->sas_mgmt.done);
1978 return 1;
1979 }
1980 return 0;
1125} 1981}
1126 1982
1127static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) 1983static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1160,6 +2016,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1160 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET; 2016 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
1161 req->PhyNum = phy->identify.phy_identifier; 2017 req->PhyNum = phy->identify.phy_identifier;
1162 2018
2019 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1163 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2020 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1164 2021
1165 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 2022 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
@@ -1174,7 +2031,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1174 2031
1175 /* a reply frame is expected */ 2032 /* a reply frame is expected */
1176 if ((ioc->sas_mgmt.status & 2033 if ((ioc->sas_mgmt.status &
1177 MPT_IOCTL_STATUS_RF_VALID) == 0) { 2034 MPT_MGMT_STATUS_RF_VALID) == 0) {
1178 error = -ENXIO; 2035 error = -ENXIO;
1179 goto out_unlock; 2036 goto out_unlock;
1180 } 2037 }
@@ -1191,6 +2048,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1191 error = 0; 2048 error = 0;
1192 2049
1193 out_unlock: 2050 out_unlock:
2051 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1194 mutex_unlock(&ioc->sas_mgmt.mutex); 2052 mutex_unlock(&ioc->sas_mgmt.mutex);
1195 out: 2053 out:
1196 return error; 2054 return error;
@@ -1304,7 +2162,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1304 struct mptsas_portinfo *port_info; 2162 struct mptsas_portinfo *port_info;
1305 2163
1306 mutex_lock(&ioc->sas_topology_mutex); 2164 mutex_lock(&ioc->sas_topology_mutex);
1307 port_info = mptsas_get_hba_portinfo(ioc); 2165 port_info = ioc->hba_port_info;
1308 if (port_info && port_info->phy_info) 2166 if (port_info && port_info->phy_info)
1309 sas_address = 2167 sas_address =
1310 port_info->phy_info[0].phy->identify.sas_address; 2168 port_info->phy_info[0].phy->identify.sas_address;
@@ -1319,26 +2177,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1319 /* request */ 2177 /* request */
1320 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2178 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 2179 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 2180 MPI_SGE_FLAGS_DIRECTION)
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 2181 << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (blk_rq_bytes(req) - 4); 2182 flagsLength |= (blk_rq_bytes(req) - 4);
1325 2183
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 2184 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 2185 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 2186 if (!dma_addr_out)
1329 goto put_mf; 2187 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 2188 ioc->add_sge(psge, flagsLength, dma_addr_out);
1331 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2189 psge += ioc->SGE_size;
1332 2190
1333 /* response */ 2191 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 2192 flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2193 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
2194 MPI_SGE_FLAGS_IOC_TO_HOST |
2195 MPI_SGE_FLAGS_END_OF_BUFFER;
2196
2197 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
1335 flagsLength |= blk_rq_bytes(rsp) + 4; 2198 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 2199 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 2200 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 2201 if (!dma_addr_in)
1339 goto unmap; 2202 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 2203 ioc->add_sge(psge, flagsLength, dma_addr_in);
1341 2204
2205 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1342 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2206 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1343 2207
1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 2208 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
@@ -1351,7 +2215,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1351 } 2215 }
1352 mf = NULL; 2216 mf = NULL;
1353 2217
1354 if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) { 2218 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
1355 SmpPassthroughReply_t *smprep; 2219 SmpPassthroughReply_t *smprep;
1356 2220
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 2221 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
@@ -1360,7 +2224,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1360 req->resid_len = 0; 2224 req->resid_len = 0;
1361 rsp->resid_len -= smprep->ResponseDataLength; 2225 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 2226 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 2227 printk(MYIOC_s_ERR_FMT
2228 "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 2229 ioc->name, __func__);
1365 ret = -ENXIO; 2230 ret = -ENXIO;
1366 } 2231 }
@@ -1375,6 +2240,7 @@ put_mf:
1375 if (mf) 2240 if (mf)
1376 mpt_free_msg_frame(ioc, mf); 2241 mpt_free_msg_frame(ioc, mf);
1377out_unlock: 2242out_unlock:
2243 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1378 mutex_unlock(&ioc->sas_mgmt.mutex); 2244 mutex_unlock(&ioc->sas_mgmt.mutex);
1379out: 2245out:
1380 return ret; 2246 return ret;
@@ -1438,7 +2304,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
1438 2304
1439 port_info->num_phys = buffer->NumPhys; 2305 port_info->num_phys = buffer->NumPhys;
1440 port_info->phy_info = kcalloc(port_info->num_phys, 2306 port_info->phy_info = kcalloc(port_info->num_phys,
1441 sizeof(*port_info->phy_info),GFP_KERNEL); 2307 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1442 if (!port_info->phy_info) { 2308 if (!port_info->phy_info) {
1443 error = -ENOMEM; 2309 error = -ENOMEM;
1444 goto out_free_consistent; 2310 goto out_free_consistent;
@@ -1600,10 +2466,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1600 __le64 sas_address; 2466 __le64 sas_address;
1601 int error=0; 2467 int error=0;
1602 2468
1603 if (ioc->sas_discovery_runtime &&
1604 mptsas_is_end_device(device_info))
1605 goto out;
1606
1607 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; 2469 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
1608 hdr.ExtPageLength = 0; 2470 hdr.ExtPageLength = 0;
1609 hdr.PageNumber = 0; 2471 hdr.PageNumber = 0;
@@ -1644,6 +2506,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1644 2506
1645 mptsas_print_device_pg0(ioc, buffer); 2507 mptsas_print_device_pg0(ioc, buffer);
1646 2508
2509 memset(device_info, 0, sizeof(struct mptsas_devinfo));
1647 device_info->handle = le16_to_cpu(buffer->DevHandle); 2510 device_info->handle = le16_to_cpu(buffer->DevHandle);
1648 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); 2511 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
1649 device_info->handle_enclosure = 2512 device_info->handle_enclosure =
@@ -1675,7 +2538,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1675 SasExpanderPage0_t *buffer; 2538 SasExpanderPage0_t *buffer;
1676 dma_addr_t dma_handle; 2539 dma_addr_t dma_handle;
1677 int i, error; 2540 int i, error;
2541 __le64 sas_address;
1678 2542
2543 memset(port_info, 0, sizeof(struct mptsas_portinfo));
1679 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 2544 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1680 hdr.ExtPageLength = 0; 2545 hdr.ExtPageLength = 0;
1681 hdr.PageNumber = 0; 2546 hdr.PageNumber = 0;
@@ -1721,18 +2586,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1721 } 2586 }
1722 2587
1723 /* save config data */ 2588 /* save config data */
1724 port_info->num_phys = buffer->NumPhys; 2589 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
1725 port_info->phy_info = kcalloc(port_info->num_phys, 2590 port_info->phy_info = kcalloc(port_info->num_phys,
1726 sizeof(*port_info->phy_info),GFP_KERNEL); 2591 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1727 if (!port_info->phy_info) { 2592 if (!port_info->phy_info) {
1728 error = -ENOMEM; 2593 error = -ENOMEM;
1729 goto out_free_consistent; 2594 goto out_free_consistent;
1730 } 2595 }
1731 2596
2597 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
1732 for (i = 0; i < port_info->num_phys; i++) { 2598 for (i = 0; i < port_info->num_phys; i++) {
1733 port_info->phy_info[i].portinfo = port_info; 2599 port_info->phy_info[i].portinfo = port_info;
1734 port_info->phy_info[i].handle = 2600 port_info->phy_info[i].handle =
1735 le16_to_cpu(buffer->DevHandle); 2601 le16_to_cpu(buffer->DevHandle);
2602 port_info->phy_info[i].identify.sas_address =
2603 le64_to_cpu(sas_address);
2604 port_info->phy_info[i].identify.handle_parent =
2605 le16_to_cpu(buffer->ParentDevHandle);
1736 } 2606 }
1737 2607
1738 out_free_consistent: 2608 out_free_consistent:
@@ -1752,11 +2622,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1752 dma_addr_t dma_handle; 2622 dma_addr_t dma_handle;
1753 int error=0; 2623 int error=0;
1754 2624
1755 if (ioc->sas_discovery_runtime && 2625 hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
1756 mptsas_is_end_device(&phy_info->attached))
1757 goto out;
1758
1759 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1760 hdr.ExtPageLength = 0; 2626 hdr.ExtPageLength = 0;
1761 hdr.PageNumber = 1; 2627 hdr.PageNumber = 1;
1762 hdr.Reserved1 = 0; 2628 hdr.Reserved1 = 0;
@@ -1791,6 +2657,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1791 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2657 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
1792 2658
1793 error = mpt_config(ioc, &cfg); 2659 error = mpt_config(ioc, &cfg);
2660
2661 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2662 error = -ENODEV;
2663 goto out;
2664 }
2665
1794 if (error) 2666 if (error)
1795 goto out_free_consistent; 2667 goto out_free_consistent;
1796 2668
@@ -2010,16 +2882,21 @@ static int mptsas_probe_one_phy(struct device *dev,
2010 goto out; 2882 goto out;
2011 } 2883 }
2012 mptsas_set_port(ioc, phy_info, port); 2884 mptsas_set_port(ioc, phy_info, port);
2013 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2885 devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
2014 "sas_port_alloc: port=%p dev=%p port_id=%d\n", 2886 MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
2015 ioc->name, port, dev, port->port_identifier)); 2887 ioc->name, port->port_identifier,
2888 (unsigned long long)phy_info->
2889 attached.sas_address));
2016 } 2890 }
2017 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", 2891 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2018 ioc->name, phy_info->phy_id)); 2892 "sas_port_add_phy: phy_id=%d\n",
2893 ioc->name, phy_info->phy_id));
2019 sas_port_add_phy(port, phy_info->phy); 2894 sas_port_add_phy(port, phy_info->phy);
2020 phy_info->sas_port_add_phy = 0; 2895 phy_info->sas_port_add_phy = 0;
2896 devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
2897 MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
2898 phy_info->phy_id, phy_info->phy));
2021 } 2899 }
2022
2023 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { 2900 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
2024 2901
2025 struct sas_rphy *rphy; 2902 struct sas_rphy *rphy;
@@ -2032,18 +2909,17 @@ static int mptsas_probe_one_phy(struct device *dev,
2032 * the adding/removing of devices that occur 2909 * the adding/removing of devices that occur
2033 * after start of day. 2910 * after start of day.
2034 */ 2911 */
2035 if (ioc->sas_discovery_runtime && 2912 if (mptsas_is_end_device(&phy_info->attached) &&
2036 mptsas_is_end_device(&phy_info->attached)) 2913 phy_info->attached.handle_parent) {
2037 goto out; 2914 goto out;
2915 }
2038 2916
2039 mptsas_parse_device_info(&identify, &phy_info->attached); 2917 mptsas_parse_device_info(&identify, &phy_info->attached);
2040 if (scsi_is_host_device(parent)) { 2918 if (scsi_is_host_device(parent)) {
2041 struct mptsas_portinfo *port_info; 2919 struct mptsas_portinfo *port_info;
2042 int i; 2920 int i;
2043 2921
2044 mutex_lock(&ioc->sas_topology_mutex); 2922 port_info = ioc->hba_port_info;
2045 port_info = mptsas_get_hba_portinfo(ioc);
2046 mutex_unlock(&ioc->sas_topology_mutex);
2047 2923
2048 for (i = 0; i < port_info->num_phys; i++) 2924 for (i = 0; i < port_info->num_phys; i++)
2049 if (port_info->phy_info[i].identify.sas_address == 2925 if (port_info->phy_info[i].identify.sas_address ==
@@ -2102,7 +2978,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2102 struct mptsas_portinfo *port_info, *hba; 2978 struct mptsas_portinfo *port_info, *hba;
2103 int error = -ENOMEM, i; 2979 int error = -ENOMEM, i;
2104 2980
2105 hba = kzalloc(sizeof(*port_info), GFP_KERNEL); 2981 hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2106 if (! hba) 2982 if (! hba)
2107 goto out; 2983 goto out;
2108 2984
@@ -2112,9 +2988,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2112 2988
2113 mptsas_sas_io_unit_pg1(ioc); 2989 mptsas_sas_io_unit_pg1(ioc);
2114 mutex_lock(&ioc->sas_topology_mutex); 2990 mutex_lock(&ioc->sas_topology_mutex);
2115 port_info = mptsas_get_hba_portinfo(ioc); 2991 port_info = ioc->hba_port_info;
2116 if (!port_info) { 2992 if (!port_info) {
2117 port_info = hba; 2993 ioc->hba_port_info = port_info = hba;
2994 ioc->hba_port_num_phy = port_info->num_phys;
2118 list_add_tail(&port_info->list, &ioc->sas_topology); 2995 list_add_tail(&port_info->list, &ioc->sas_topology);
2119 } else { 2996 } else {
2120 for (i = 0; i < hba->num_phys; i++) { 2997 for (i = 0; i < hba->num_phys; i++) {
@@ -2130,15 +3007,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2130 hba = NULL; 3007 hba = NULL;
2131 } 3008 }
2132 mutex_unlock(&ioc->sas_topology_mutex); 3009 mutex_unlock(&ioc->sas_topology_mutex);
3010#if defined(CPQ_CIM)
3011 ioc->num_ports = port_info->num_phys;
3012#endif
2133 for (i = 0; i < port_info->num_phys; i++) { 3013 for (i = 0; i < port_info->num_phys; i++) {
2134 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 3014 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
2135 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 3015 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
2136 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 3016 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
2137 3017 port_info->phy_info[i].identify.handle =
3018 port_info->phy_info[i].handle;
2138 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, 3019 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
2139 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3020 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2140 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3021 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2141 port_info->phy_info[i].handle); 3022 port_info->phy_info[i].identify.handle);
3023 if (!ioc->hba_port_sas_addr)
3024 ioc->hba_port_sas_addr =
3025 port_info->phy_info[i].identify.sas_address;
2142 port_info->phy_info[i].identify.phy_id = 3026 port_info->phy_info[i].identify.phy_id =
2143 port_info->phy_info[i].phy_id = i; 3027 port_info->phy_info[i].phy_id = i;
2144 if (port_info->phy_info[i].attached.handle) 3028 if (port_info->phy_info[i].attached.handle)
@@ -2163,248 +3047,721 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2163 return error; 3047 return error;
2164} 3048}
2165 3049
2166static int 3050static void
2167mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) 3051mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
2168{ 3052{
2169 struct mptsas_portinfo *port_info, *p, *ex; 3053 struct mptsas_portinfo *parent;
2170 struct device *parent; 3054 struct device *parent_dev;
2171 struct sas_rphy *rphy; 3055 struct sas_rphy *rphy;
2172 int error = -ENOMEM, i, j; 3056 int i;
2173 3057 u64 sas_address; /* expander sas address */
2174 ex = kzalloc(sizeof(*port_info), GFP_KERNEL); 3058 u32 handle;
2175 if (!ex) 3059
2176 goto out; 3060 handle = port_info->phy_info[0].handle;
2177 3061 sas_address = port_info->phy_info[0].identify.sas_address;
2178 error = mptsas_sas_expander_pg0(ioc, ex,
2179 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
2180 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
2181 if (error)
2182 goto out_free_port_info;
2183
2184 *handle = ex->phy_info[0].handle;
2185
2186 mutex_lock(&ioc->sas_topology_mutex);
2187 port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
2188 if (!port_info) {
2189 port_info = ex;
2190 list_add_tail(&port_info->list, &ioc->sas_topology);
2191 } else {
2192 for (i = 0; i < ex->num_phys; i++) {
2193 port_info->phy_info[i].handle =
2194 ex->phy_info[i].handle;
2195 port_info->phy_info[i].port_id =
2196 ex->phy_info[i].port_id;
2197 }
2198 kfree(ex->phy_info);
2199 kfree(ex);
2200 ex = NULL;
2201 }
2202 mutex_unlock(&ioc->sas_topology_mutex);
2203
2204 for (i = 0; i < port_info->num_phys; i++) { 3062 for (i = 0; i < port_info->num_phys; i++) {
2205 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], 3063 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
2206 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << 3064 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
2207 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); 3065 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
2208 3066
2209 if (port_info->phy_info[i].identify.handle) { 3067 mptsas_sas_device_pg0(ioc,
2210 mptsas_sas_device_pg0(ioc, 3068 &port_info->phy_info[i].identify,
2211 &port_info->phy_info[i].identify, 3069 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2212 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3070 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2213 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3071 port_info->phy_info[i].identify.handle);
2214 port_info->phy_info[i].identify.handle); 3072 port_info->phy_info[i].identify.phy_id =
2215 port_info->phy_info[i].identify.phy_id = 3073 port_info->phy_info[i].phy_id;
2216 port_info->phy_info[i].phy_id;
2217 }
2218 3074
2219 if (port_info->phy_info[i].attached.handle) { 3075 if (port_info->phy_info[i].attached.handle) {
2220 mptsas_sas_device_pg0(ioc, 3076 mptsas_sas_device_pg0(ioc,
2221 &port_info->phy_info[i].attached, 3077 &port_info->phy_info[i].attached,
2222 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3078 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2223 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3079 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2224 port_info->phy_info[i].attached.handle); 3080 port_info->phy_info[i].attached.handle);
2225 port_info->phy_info[i].attached.phy_id = 3081 port_info->phy_info[i].attached.phy_id =
2226 port_info->phy_info[i].phy_id; 3082 port_info->phy_info[i].phy_id;
2227 } 3083 }
2228 } 3084 }
2229 3085
2230 parent = &ioc->sh->shost_gendev; 3086 mutex_lock(&ioc->sas_topology_mutex);
2231 for (i = 0; i < port_info->num_phys; i++) { 3087 parent = mptsas_find_portinfo_by_handle(ioc,
2232 mutex_lock(&ioc->sas_topology_mutex); 3088 port_info->phy_info[0].identify.handle_parent);
2233 list_for_each_entry(p, &ioc->sas_topology, list) { 3089 if (!parent) {
2234 for (j = 0; j < p->num_phys; j++) {
2235 if (port_info->phy_info[i].identify.handle !=
2236 p->phy_info[j].attached.handle)
2237 continue;
2238 rphy = mptsas_get_rphy(&p->phy_info[j]);
2239 parent = &rphy->dev;
2240 }
2241 }
2242 mutex_unlock(&ioc->sas_topology_mutex); 3090 mutex_unlock(&ioc->sas_topology_mutex);
3091 return;
2243 } 3092 }
3093 for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
3094 i++) {
3095 if (parent->phy_info[i].attached.sas_address == sas_address) {
3096 rphy = mptsas_get_rphy(&parent->phy_info[i]);
3097 parent_dev = &rphy->dev;
3098 }
3099 }
3100 mutex_unlock(&ioc->sas_topology_mutex);
2244 3101
2245 mptsas_setup_wide_ports(ioc, port_info); 3102 mptsas_setup_wide_ports(ioc, port_info);
2246
2247 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++) 3103 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
2248 mptsas_probe_one_phy(parent, &port_info->phy_info[i], 3104 mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
2249 ioc->sas_index, 0); 3105 ioc->sas_index, 0);
3106}
2250 3107
2251 return 0; 3108static void
3109mptsas_expander_event_add(MPT_ADAPTER *ioc,
3110 MpiEventDataSasExpanderStatusChange_t *expander_data)
3111{
3112 struct mptsas_portinfo *port_info;
3113 int i;
3114 __le64 sas_address;
2252 3115
2253 out_free_port_info: 3116 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2254 if (ex) { 3117 if (!port_info)
2255 kfree(ex->phy_info); 3118 BUG();
2256 kfree(ex); 3119 port_info->num_phys = (expander_data->NumPhys) ?
3120 expander_data->NumPhys : 1;
3121 port_info->phy_info = kcalloc(port_info->num_phys,
3122 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
3123 if (!port_info->phy_info)
3124 BUG();
3125 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3126 for (i = 0; i < port_info->num_phys; i++) {
3127 port_info->phy_info[i].portinfo = port_info;
3128 port_info->phy_info[i].handle =
3129 le16_to_cpu(expander_data->DevHandle);
3130 port_info->phy_info[i].identify.sas_address =
3131 le64_to_cpu(sas_address);
3132 port_info->phy_info[i].identify.handle_parent =
3133 le16_to_cpu(expander_data->ParentDevHandle);
3134 }
3135
3136 mutex_lock(&ioc->sas_topology_mutex);
3137 list_add_tail(&port_info->list, &ioc->sas_topology);
3138 mutex_unlock(&ioc->sas_topology_mutex);
3139
3140 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3141 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3142 (unsigned long long)sas_address);
3143
3144 mptsas_expander_refresh(ioc, port_info);
3145}
3146
3147/**
3148 * mptsas_delete_expander_siblings - remove siblings attached to expander
3149 * @ioc: Pointer to MPT_ADAPTER structure
3150 * @parent: the parent port_info object
3151 * @expander: the expander port_info object
3152 **/
3153static void
3154mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
3155 *parent, struct mptsas_portinfo *expander)
3156{
3157 struct mptsas_phyinfo *phy_info;
3158 struct mptsas_portinfo *port_info;
3159 struct sas_rphy *rphy;
3160 int i;
3161
3162 phy_info = expander->phy_info;
3163 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3164 rphy = mptsas_get_rphy(phy_info);
3165 if (!rphy)
3166 continue;
3167 if (rphy->identify.device_type == SAS_END_DEVICE)
3168 mptsas_del_end_device(ioc, phy_info);
3169 }
3170
3171 phy_info = expander->phy_info;
3172 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3173 rphy = mptsas_get_rphy(phy_info);
3174 if (!rphy)
3175 continue;
3176 if (rphy->identify.device_type ==
3177 MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
3178 rphy->identify.device_type ==
3179 MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
3180 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3181 rphy->identify.sas_address);
3182 if (!port_info)
3183 continue;
3184 if (port_info == parent) /* backlink rphy */
3185 continue;
3186 /*
3187 Delete this expander even if the expdevpage is exists
3188 because the parent expander is already deleted
3189 */
3190 mptsas_expander_delete(ioc, port_info, 1);
3191 }
3192 }
3193}
3194
3195
3196/**
3197 * mptsas_expander_delete - remove this expander
3198 * @ioc: Pointer to MPT_ADAPTER structure
3199 * @port_info: expander port_info struct
3200 * @force: Flag to forcefully delete the expander
3201 *
3202 **/
3203
3204static void mptsas_expander_delete(MPT_ADAPTER *ioc,
3205 struct mptsas_portinfo *port_info, u8 force)
3206{
3207
3208 struct mptsas_portinfo *parent;
3209 int i;
3210 u64 expander_sas_address;
3211 struct mptsas_phyinfo *phy_info;
3212 struct mptsas_portinfo buffer;
3213 struct mptsas_portinfo_details *port_details;
3214 struct sas_port *port;
3215
3216 if (!port_info)
3217 return;
3218
3219 /* see if expander is still there before deleting */
3220 mptsas_sas_expander_pg0(ioc, &buffer,
3221 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3222 MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
3223 port_info->phy_info[0].identify.handle);
3224
3225 if (buffer.num_phys) {
3226 kfree(buffer.phy_info);
3227 if (!force)
3228 return;
3229 }
3230
3231
3232 /*
3233 * Obtain the port_info instance to the parent port
3234 */
3235 port_details = NULL;
3236 expander_sas_address =
3237 port_info->phy_info[0].identify.sas_address;
3238 parent = mptsas_find_portinfo_by_handle(ioc,
3239 port_info->phy_info[0].identify.handle_parent);
3240 mptsas_delete_expander_siblings(ioc, parent, port_info);
3241 if (!parent)
3242 goto out;
3243
3244 /*
3245 * Delete rphys in the parent that point
3246 * to this expander.
3247 */
3248 phy_info = parent->phy_info;
3249 port = NULL;
3250 for (i = 0; i < parent->num_phys; i++, phy_info++) {
3251 if (!phy_info->phy)
3252 continue;
3253 if (phy_info->attached.sas_address !=
3254 expander_sas_address)
3255 continue;
3256 if (!port) {
3257 port = mptsas_get_port(phy_info);
3258 port_details = phy_info->port_details;
3259 }
3260 dev_printk(KERN_DEBUG, &phy_info->phy->dev,
3261 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
3262 phy_info->phy_id, phy_info->phy);
3263 sas_port_delete_phy(port, phy_info->phy);
3264 }
3265 if (port) {
3266 dev_printk(KERN_DEBUG, &port->dev,
3267 MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
3268 ioc->name, port->port_identifier,
3269 (unsigned long long)expander_sas_address);
3270 sas_port_delete(port);
3271 mptsas_port_delete(ioc, port_details);
2257 } 3272 }
2258 out: 3273 out:
2259 return error; 3274
3275 printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
3276 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3277 (unsigned long long)expander_sas_address);
3278
3279 /*
3280 * free link
3281 */
3282 list_del(&port_info->list);
3283 kfree(port_info->phy_info);
3284 kfree(port_info);
2260} 3285}
2261 3286
2262/* 3287
2263 * mptsas_delete_expander_phys 3288/**
3289 * mptsas_send_expander_event - expanders events
3290 * @ioc: Pointer to MPT_ADAPTER structure
3291 * @expander_data: event data
2264 * 3292 *
2265 * 3293 *
2266 * This will traverse topology, and remove expanders 3294 * This function handles adding, removing, and refreshing
2267 * that are no longer present 3295 * device handles within the expander objects.
2268 */ 3296 */
2269static void 3297static void
2270mptsas_delete_expander_phys(MPT_ADAPTER *ioc) 3298mptsas_send_expander_event(struct fw_event_work *fw_event)
2271{ 3299{
2272 struct mptsas_portinfo buffer; 3300 MPT_ADAPTER *ioc;
2273 struct mptsas_portinfo *port_info, *n, *parent; 3301 MpiEventDataSasExpanderStatusChange_t *expander_data;
2274 struct mptsas_phyinfo *phy_info; 3302 struct mptsas_portinfo *port_info;
2275 struct sas_port * port; 3303 __le64 sas_address;
2276 int i; 3304 int i;
2277 u64 expander_sas_address;
2278 3305
3306 ioc = fw_event->ioc;
3307 expander_data = (MpiEventDataSasExpanderStatusChange_t *)
3308 fw_event->event_data;
3309 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3310 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3311
3312 if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
3313 if (port_info) {
3314 for (i = 0; i < port_info->num_phys; i++) {
3315 port_info->phy_info[i].portinfo = port_info;
3316 port_info->phy_info[i].handle =
3317 le16_to_cpu(expander_data->DevHandle);
3318 port_info->phy_info[i].identify.sas_address =
3319 le64_to_cpu(sas_address);
3320 port_info->phy_info[i].identify.handle_parent =
3321 le16_to_cpu(expander_data->ParentDevHandle);
3322 }
3323 mptsas_expander_refresh(ioc, port_info);
3324 } else if (!port_info && expander_data->NumPhys)
3325 mptsas_expander_event_add(ioc, expander_data);
3326 } else if (expander_data->ReasonCode ==
3327 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
3328 mptsas_expander_delete(ioc, port_info, 0);
3329
3330 mptsas_free_fw_event(ioc, fw_event);
3331}
3332
3333
3334/**
3335 * mptsas_expander_add -
3336 * @ioc: Pointer to MPT_ADAPTER structure
3337 * @handle:
3338 *
3339 */
3340struct mptsas_portinfo *
3341mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
3342{
3343 struct mptsas_portinfo buffer, *port_info;
3344 int i;
3345
3346 if ((mptsas_sas_expander_pg0(ioc, &buffer,
3347 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3348 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
3349 return NULL;
3350
3351 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
3352 if (!port_info) {
3353 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3354 "%s: exit at line=%d\n", ioc->name,
3355 __func__, __LINE__));
3356 return NULL;
3357 }
3358 port_info->num_phys = buffer.num_phys;
3359 port_info->phy_info = buffer.phy_info;
3360 for (i = 0; i < port_info->num_phys; i++)
3361 port_info->phy_info[i].portinfo = port_info;
2279 mutex_lock(&ioc->sas_topology_mutex); 3362 mutex_lock(&ioc->sas_topology_mutex);
2280 list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { 3363 list_add_tail(&port_info->list, &ioc->sas_topology);
3364 mutex_unlock(&ioc->sas_topology_mutex);
3365 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3366 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3367 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3368 mptsas_expander_refresh(ioc, port_info);
3369 return port_info;
3370}
2281 3371
2282 if (!(port_info->phy_info[0].identify.device_info & 3372static void
2283 MPI_SAS_DEVICE_INFO_SMP_TARGET)) 3373mptsas_send_link_status_event(struct fw_event_work *fw_event)
3374{
3375 MPT_ADAPTER *ioc;
3376 MpiEventDataSasPhyLinkStatus_t *link_data;
3377 struct mptsas_portinfo *port_info;
3378 struct mptsas_phyinfo *phy_info = NULL;
3379 __le64 sas_address;
3380 u8 phy_num;
3381 u8 link_rate;
3382
3383 ioc = fw_event->ioc;
3384 link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
3385
3386 memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
3387 sas_address = le64_to_cpu(sas_address);
3388 link_rate = link_data->LinkRates >> 4;
3389 phy_num = link_data->PhyNum;
3390
3391 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3392 if (port_info) {
3393 phy_info = &port_info->phy_info[phy_num];
3394 if (phy_info)
3395 phy_info->negotiated_link_rate = link_rate;
3396 }
3397
3398 if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
3399 link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
3400
3401 if (!port_info) {
3402 if (ioc->old_sas_discovery_protocal) {
3403 port_info = mptsas_expander_add(ioc,
3404 le16_to_cpu(link_data->DevHandle));
3405 if (port_info)
3406 goto out;
3407 }
3408 goto out;
3409 }
3410
3411 if (port_info == ioc->hba_port_info)
3412 mptsas_probe_hba_phys(ioc);
3413 else
3414 mptsas_expander_refresh(ioc, port_info);
3415 } else if (phy_info && phy_info->phy) {
3416 if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
3417 phy_info->phy->negotiated_linkrate =
3418 SAS_PHY_DISABLED;
3419 else if (link_rate ==
3420 MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
3421 phy_info->phy->negotiated_linkrate =
3422 SAS_LINK_RATE_FAILED;
3423 else
3424 phy_info->phy->negotiated_linkrate =
3425 SAS_LINK_RATE_UNKNOWN;
3426 }
3427 out:
3428 mptsas_free_fw_event(ioc, fw_event);
3429}
3430
3431static void
3432mptsas_not_responding_devices(MPT_ADAPTER *ioc)
3433{
3434 struct mptsas_portinfo buffer, *port_info;
3435 struct mptsas_device_info *sas_info;
3436 struct mptsas_devinfo sas_device;
3437 u32 handle;
3438 VirtTarget *vtarget = NULL;
3439 struct mptsas_phyinfo *phy_info;
3440 u8 found_expander;
3441 int retval, retry_count;
3442 unsigned long flags;
3443
3444 mpt_findImVolumes(ioc);
3445
3446 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3447 if (ioc->ioc_reset_in_progress) {
3448 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3449 "%s: exiting due to a parallel reset \n", ioc->name,
3450 __func__));
3451 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3452 return;
3453 }
3454 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3455
3456 /* devices, logical volumes */
3457 mutex_lock(&ioc->sas_device_info_mutex);
3458 redo_device_scan:
3459 list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
3460 if (sas_info->is_cached)
2284 continue; 3461 continue;
3462 if (!sas_info->is_logical_volume) {
3463 sas_device.handle = 0;
3464 retry_count = 0;
3465retry_page:
3466 retval = mptsas_sas_device_pg0(ioc, &sas_device,
3467 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
3468 << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3469 (sas_info->fw.channel << 8) +
3470 sas_info->fw.id);
3471
3472 if (sas_device.handle)
3473 continue;
3474 if (retval == -EBUSY) {
3475 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3476 if (ioc->ioc_reset_in_progress) {
3477 dfailprintk(ioc,
3478 printk(MYIOC_s_DEBUG_FMT
3479 "%s: exiting due to reset\n",
3480 ioc->name, __func__));
3481 spin_unlock_irqrestore
3482 (&ioc->taskmgmt_lock, flags);
3483 mutex_unlock(&ioc->
3484 sas_device_info_mutex);
3485 return;
3486 }
3487 spin_unlock_irqrestore(&ioc->taskmgmt_lock,
3488 flags);
3489 }
2285 3490
2286 if (mptsas_sas_expander_pg0(ioc, &buffer, 3491 if (retval && (retval != -ENODEV)) {
2287 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << 3492 if (retry_count < 10) {
2288 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), 3493 retry_count++;
2289 port_info->phy_info[0].handle)) { 3494 goto retry_page;
3495 } else {
3496 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3497 "%s: Config page retry exceeded retry "
3498 "count deleting device 0x%llx\n",
3499 ioc->name, __func__,
3500 sas_info->sas_address));
3501 }
3502 }
2290 3503
2291 /* 3504 /* delete device */
2292 * Obtain the port_info instance to the parent port 3505 vtarget = mptsas_find_vtarget(ioc,
2293 */ 3506 sas_info->fw.channel, sas_info->fw.id);
2294 parent = mptsas_find_portinfo_by_handle(ioc,
2295 port_info->phy_info[0].identify.handle_parent);
2296 3507
2297 if (!parent) 3508 if (vtarget)
2298 goto next_port; 3509 vtarget->deleted = 1;
2299 3510
2300 expander_sas_address = 3511 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2301 port_info->phy_info[0].identify.sas_address; 3512 sas_info->sas_address);
2302 3513
2303 /* 3514 if (phy_info) {
2304 * Delete rphys in the parent that point 3515 mptsas_del_end_device(ioc, phy_info);
2305 * to this expander. The transport layer will 3516 goto redo_device_scan;
2306 * cleanup all the children.
2307 */
2308 phy_info = parent->phy_info;
2309 for (i = 0; i < parent->num_phys; i++, phy_info++) {
2310 port = mptsas_get_port(phy_info);
2311 if (!port)
2312 continue;
2313 if (phy_info->attached.sas_address !=
2314 expander_sas_address)
2315 continue;
2316 dsaswideprintk(ioc,
2317 dev_printk(KERN_DEBUG, &port->dev,
2318 MYIOC_s_FMT "delete port (%d)\n", ioc->name,
2319 port->port_identifier));
2320 sas_port_delete(port);
2321 mptsas_port_delete(ioc, phy_info->port_details);
2322 } 3517 }
2323 next_port: 3518 } else
3519 mptsas_volume_delete(ioc, sas_info->fw.id);
3520 }
3521 mutex_lock(&ioc->sas_device_info_mutex);
2324 3522
2325 phy_info = port_info->phy_info; 3523 /* expanders */
2326 for (i = 0; i < port_info->num_phys; i++, phy_info++) 3524 mutex_lock(&ioc->sas_topology_mutex);
2327 mptsas_port_delete(ioc, phy_info->port_details); 3525 redo_expander_scan:
3526 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2328 3527
2329 list_del(&port_info->list); 3528 if (port_info->phy_info &&
2330 kfree(port_info->phy_info); 3529 (!(port_info->phy_info[0].identify.device_info &
2331 kfree(port_info); 3530 MPI_SAS_DEVICE_INFO_SMP_TARGET)))
3531 continue;
3532 found_expander = 0;
3533 handle = 0xFFFF;
3534 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3535 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3536 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
3537 !found_expander) {
3538
3539 handle = buffer.phy_info[0].handle;
3540 if (buffer.phy_info[0].identify.sas_address ==
3541 port_info->phy_info[0].identify.sas_address) {
3542 found_expander = 1;
3543 }
3544 kfree(buffer.phy_info);
3545 }
3546
3547 if (!found_expander) {
3548 mptsas_expander_delete(ioc, port_info, 0);
3549 goto redo_expander_scan;
2332 } 3550 }
2333 /*
2334 * Free this memory allocated from inside
2335 * mptsas_sas_expander_pg0
2336 */
2337 kfree(buffer.phy_info);
2338 } 3551 }
2339 mutex_unlock(&ioc->sas_topology_mutex); 3552 mutex_lock(&ioc->sas_topology_mutex);
3553}
3554
3555/**
3556 * mptsas_probe_expanders - adding expanders
3557 * @ioc: Pointer to MPT_ADAPTER structure
3558 *
3559 **/
3560static void
3561mptsas_probe_expanders(MPT_ADAPTER *ioc)
3562{
3563 struct mptsas_portinfo buffer, *port_info;
3564 u32 handle;
3565 int i;
3566
3567 handle = 0xFFFF;
3568 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3569 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3570 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
3571
3572 handle = buffer.phy_info[0].handle;
3573 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3574 buffer.phy_info[0].identify.sas_address);
3575
3576 if (port_info) {
3577 /* refreshing handles */
3578 for (i = 0; i < buffer.num_phys; i++) {
3579 port_info->phy_info[i].handle = handle;
3580 port_info->phy_info[i].identify.handle_parent =
3581 buffer.phy_info[0].identify.handle_parent;
3582 }
3583 mptsas_expander_refresh(ioc, port_info);
3584 kfree(buffer.phy_info);
3585 continue;
3586 }
3587
3588 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
3589 if (!port_info) {
3590 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3591 "%s: exit at line=%d\n", ioc->name,
3592 __func__, __LINE__));
3593 return;
3594 }
3595 port_info->num_phys = buffer.num_phys;
3596 port_info->phy_info = buffer.phy_info;
3597 for (i = 0; i < port_info->num_phys; i++)
3598 port_info->phy_info[i].portinfo = port_info;
3599 mutex_lock(&ioc->sas_topology_mutex);
3600 list_add_tail(&port_info->list, &ioc->sas_topology);
3601 mutex_unlock(&ioc->sas_topology_mutex);
3602 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3603 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3604 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3605 mptsas_expander_refresh(ioc, port_info);
3606 }
2340} 3607}
2341 3608
2342/* 3609static void
2343 * Start of day discovery 3610mptsas_probe_devices(MPT_ADAPTER *ioc)
2344 */ 3611{
3612 u16 handle;
3613 struct mptsas_devinfo sas_device;
3614 struct mptsas_phyinfo *phy_info;
3615
3616 handle = 0xFFFF;
3617 while (!(mptsas_sas_device_pg0(ioc, &sas_device,
3618 MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
3619
3620 handle = sas_device.handle;
3621
3622 if ((sas_device.device_info &
3623 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
3624 MPI_SAS_DEVICE_INFO_STP_TARGET |
3625 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
3626 continue;
3627
3628 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
3629 if (!phy_info)
3630 continue;
3631
3632 if (mptsas_get_rphy(phy_info))
3633 continue;
3634
3635 mptsas_add_end_device(ioc, phy_info);
3636 }
3637}
3638
3639/**
3640 * mptsas_scan_sas_topology -
3641 * @ioc: Pointer to MPT_ADAPTER structure
3642 * @sas_address:
3643 *
3644 **/
2345static void 3645static void
2346mptsas_scan_sas_topology(MPT_ADAPTER *ioc) 3646mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
2347{ 3647{
2348 u32 handle = 0xFFFF; 3648 struct scsi_device *sdev;
2349 int i; 3649 int i;
2350 3650
2351 mutex_lock(&ioc->sas_discovery_mutex);
2352 mptsas_probe_hba_phys(ioc); 3651 mptsas_probe_hba_phys(ioc);
2353 while (!mptsas_probe_expander_phys(ioc, &handle)) 3652 mptsas_probe_expanders(ioc);
2354 ; 3653 mptsas_probe_devices(ioc);
3654
2355 /* 3655 /*
2356 Reporting RAID volumes. 3656 Reporting RAID volumes.
2357 */ 3657 */
2358 if (!ioc->ir_firmware) 3658 if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
2359 goto out; 3659 !ioc->raid_data.pIocPg2->NumActiveVolumes)
2360 if (!ioc->raid_data.pIocPg2) 3660 return;
2361 goto out;
2362 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
2363 goto out;
2364 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 3661 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
3662 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
3663 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
3664 if (sdev) {
3665 scsi_device_put(sdev);
3666 continue;
3667 }
3668 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
3669 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
3670 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
2365 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, 3671 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
2366 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 3672 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
2367 } 3673 }
2368 out:
2369 mutex_unlock(&ioc->sas_discovery_mutex);
2370} 3674}
2371 3675
2372/* 3676
2373 * Work queue thread to handle Runtime discovery
2374 * Mere purpose is the hot add/delete of expanders
2375 *(Mutex UNLOCKED)
2376 */
2377static void 3677static void
2378__mptsas_discovery_work(MPT_ADAPTER *ioc) 3678mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
2379{ 3679{
2380 u32 handle = 0xFFFF; 3680 MPT_ADAPTER *ioc;
3681 EventDataQueueFull_t *qfull_data;
3682 struct mptsas_device_info *sas_info;
3683 struct scsi_device *sdev;
3684 int depth;
3685 int id = -1;
3686 int channel = -1;
3687 int fw_id, fw_channel;
3688 u16 current_depth;
3689
3690
3691 ioc = fw_event->ioc;
3692 qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
3693 fw_id = qfull_data->TargetID;
3694 fw_channel = qfull_data->Bus;
3695 current_depth = le16_to_cpu(qfull_data->CurrentDepth);
3696
3697 /* if hidden raid component, look for the volume id */
3698 mutex_lock(&ioc->sas_device_info_mutex);
3699 if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
3700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3701 list) {
3702 if (sas_info->is_cached ||
3703 sas_info->is_logical_volume)
3704 continue;
3705 if (sas_info->is_hidden_raid_component &&
3706 (sas_info->fw.channel == fw_channel &&
3707 sas_info->fw.id == fw_id)) {
3708 id = sas_info->volume_id;
3709 channel = MPTSAS_RAID_CHANNEL;
3710 goto out;
3711 }
3712 }
3713 } else {
3714 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3715 list) {
3716 if (sas_info->is_cached ||
3717 sas_info->is_hidden_raid_component ||
3718 sas_info->is_logical_volume)
3719 continue;
3720 if (sas_info->fw.channel == fw_channel &&
3721 sas_info->fw.id == fw_id) {
3722 id = sas_info->os.id;
3723 channel = sas_info->os.channel;
3724 goto out;
3725 }
3726 }
2381 3727
2382 ioc->sas_discovery_runtime=1; 3728 }
2383 mptsas_delete_expander_phys(ioc);
2384 mptsas_probe_hba_phys(ioc);
2385 while (!mptsas_probe_expander_phys(ioc, &handle))
2386 ;
2387 ioc->sas_discovery_runtime=0;
2388}
2389 3729
2390/* 3730 out:
2391 * Work queue thread to handle Runtime discovery 3731 mutex_unlock(&ioc->sas_device_info_mutex);
2392 * Mere purpose is the hot add/delete of expanders 3732
2393 *(Mutex LOCKED) 3733 if (id != -1) {
2394 */ 3734 shost_for_each_device(sdev, ioc->sh) {
2395static void 3735 if (sdev->id == id && sdev->channel == channel) {
2396mptsas_discovery_work(struct work_struct *work) 3736 if (current_depth > sdev->queue_depth) {
2397{ 3737 sdev_printk(KERN_INFO, sdev,
2398 struct mptsas_discovery_event *ev = 3738 "strange observation, the queue "
2399 container_of(work, struct mptsas_discovery_event, work); 3739 "depth is (%d) meanwhile fw queue "
2400 MPT_ADAPTER *ioc = ev->ioc; 3740 "depth (%d)\n", sdev->queue_depth,
3741 current_depth);
3742 continue;
3743 }
3744 depth = scsi_track_queue_full(sdev,
3745 current_depth - 1);
3746 if (depth > 0)
3747 sdev_printk(KERN_INFO, sdev,
3748 "Queue depth reduced to (%d)\n",
3749 depth);
3750 else if (depth < 0)
3751 sdev_printk(KERN_INFO, sdev,
3752 "Tagged Command Queueing is being "
3753 "disabled\n");
3754 else if (depth == 0)
3755 sdev_printk(KERN_INFO, sdev,
3756 "Queue depth not changed yet\n");
3757 }
3758 }
3759 }
2401 3760
2402 mutex_lock(&ioc->sas_discovery_mutex); 3761 mptsas_free_fw_event(ioc, fw_event);
2403 __mptsas_discovery_work(ioc);
2404 mutex_unlock(&ioc->sas_discovery_mutex);
2405 kfree(ev);
2406} 3762}
2407 3763
3764
2408static struct mptsas_phyinfo * 3765static struct mptsas_phyinfo *
2409mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) 3766mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2410{ 3767{
@@ -2429,69 +3786,80 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2429 return phy_info; 3786 return phy_info;
2430} 3787}
2431 3788
3789/**
3790 * mptsas_find_phyinfo_by_phys_disk_num -
3791 * @ioc: Pointer to MPT_ADAPTER structure
3792 * @phys_disk_num:
3793 * @channel:
3794 * @id:
3795 *
3796 **/
2432static struct mptsas_phyinfo * 3797static struct mptsas_phyinfo *
2433mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id) 3798mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
3799 u8 channel, u8 id)
2434{ 3800{
2435 struct mptsas_portinfo *port_info;
2436 struct mptsas_phyinfo *phy_info = NULL; 3801 struct mptsas_phyinfo *phy_info = NULL;
3802 struct mptsas_portinfo *port_info;
3803 RaidPhysDiskPage1_t *phys_disk = NULL;
3804 int num_paths;
3805 u64 sas_address = 0;
2437 int i; 3806 int i;
2438 3807
2439 mutex_lock(&ioc->sas_topology_mutex); 3808 phy_info = NULL;
2440 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3809 if (!ioc->raid_data.pIocPg3)
2441 for (i = 0; i < port_info->num_phys; i++) { 3810 return NULL;
2442 if (!mptsas_is_end_device( 3811 /* dual port support */
2443 &port_info->phy_info[i].attached)) 3812 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
2444 continue; 3813 if (!num_paths)
2445 if (port_info->phy_info[i].attached.id != id) 3814 goto out;
2446 continue; 3815 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2447 if (port_info->phy_info[i].attached.channel != channel) 3816 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2448 continue; 3817 if (!phys_disk)
2449 phy_info = &port_info->phy_info[i]; 3818 goto out;
2450 break; 3819 mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
3820 for (i = 0; i < num_paths; i++) {
3821 if ((phys_disk->Path[i].Flags & 1) != 0)
3822 /* entry no longer valid */
3823 continue;
3824 if ((id == phys_disk->Path[i].PhysDiskID) &&
3825 (channel == phys_disk->Path[i].PhysDiskBus)) {
3826 memcpy(&sas_address, &phys_disk->Path[i].WWID,
3827 sizeof(u64));
3828 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
3829 sas_address);
3830 goto out;
2451 } 3831 }
2452 } 3832 }
2453 mutex_unlock(&ioc->sas_topology_mutex);
2454 return phy_info;
2455}
2456 3833
2457static struct mptsas_phyinfo * 3834 out:
2458mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 3835 kfree(phys_disk);
2459{ 3836 if (phy_info)
2460 struct mptsas_portinfo *port_info; 3837 return phy_info;
2461 struct mptsas_phyinfo *phy_info = NULL;
2462 int i;
2463 3838
3839 /*
3840 * Extra code to handle RAID0 case, where the sas_address is not updated
3841 * in phys_disk_page_1 when hotswapped
3842 */
2464 mutex_lock(&ioc->sas_topology_mutex); 3843 mutex_lock(&ioc->sas_topology_mutex);
2465 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3844 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2466 for (i = 0; i < port_info->num_phys; i++) { 3845 for (i = 0; i < port_info->num_phys && !phy_info; i++) {
2467 if (!mptsas_is_end_device( 3846 if (!mptsas_is_end_device(
2468 &port_info->phy_info[i].attached)) 3847 &port_info->phy_info[i].attached))
2469 continue; 3848 continue;
2470 if (port_info->phy_info[i].attached.phys_disk_num == ~0) 3849 if (port_info->phy_info[i].attached.phys_disk_num == ~0)
2471 continue; 3850 continue;
2472 if (port_info->phy_info[i].attached.phys_disk_num != id) 3851 if ((port_info->phy_info[i].attached.phys_disk_num ==
2473 continue; 3852 phys_disk_num) &&
2474 if (port_info->phy_info[i].attached.channel != channel) 3853 (port_info->phy_info[i].attached.id == id) &&
2475 continue; 3854 (port_info->phy_info[i].attached.channel ==
2476 phy_info = &port_info->phy_info[i]; 3855 channel))
2477 break; 3856 phy_info = &port_info->phy_info[i];
2478 } 3857 }
2479 } 3858 }
2480 mutex_unlock(&ioc->sas_topology_mutex); 3859 mutex_unlock(&ioc->sas_topology_mutex);
2481 return phy_info; 3860 return phy_info;
2482} 3861}
2483 3862
2484/*
2485 * Work queue thread to clear the persitency table
2486 */
2487static void
2488mptsas_persist_clear_table(struct work_struct *work)
2489{
2490 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2491
2492 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2493}
2494
2495static void 3863static void
2496mptsas_reprobe_lun(struct scsi_device *sdev, void *data) 3864mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
2497{ 3865{
@@ -2517,7 +3885,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2517 pRaidVolumePage0_t buffer = NULL; 3885 pRaidVolumePage0_t buffer = NULL;
2518 RaidPhysDiskPage0_t phys_disk; 3886 RaidPhysDiskPage0_t phys_disk;
2519 int i; 3887 int i;
2520 struct mptsas_hotplug_event *ev; 3888 struct mptsas_phyinfo *phy_info;
3889 struct mptsas_devinfo sas_device;
2521 3890
2522 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 3891 memset(&cfg, 0 , sizeof(CONFIGPARMS));
2523 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 3892 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
@@ -2557,20 +3926,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2557 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) 3926 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
2558 continue; 3927 continue;
2559 3928
2560 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3929 if (mptsas_sas_device_pg0(ioc, &sas_device,
2561 if (!ev) { 3930 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2562 printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name); 3931 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2563 goto out; 3932 (phys_disk.PhysDiskBus << 8) +
2564 } 3933 phys_disk.PhysDiskID))
3934 continue;
2565 3935
2566 INIT_WORK(&ev->work, mptsas_hotplug_work); 3936 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2567 ev->ioc = ioc; 3937 sas_device.sas_address);
2568 ev->id = phys_disk.PhysDiskID; 3938 mptsas_add_end_device(ioc, phy_info);
2569 ev->channel = phys_disk.PhysDiskBus;
2570 ev->phys_disk_num_valid = 1;
2571 ev->phys_disk_num = phys_disk.PhysDiskNum;
2572 ev->event_type = MPTSAS_ADD_DEVICE;
2573 schedule_work(&ev->work);
2574 } 3939 }
2575 3940
2576 out: 3941 out:
@@ -2582,417 +3947,386 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2582 * Work queue thread to handle SAS hotplug events 3947 * Work queue thread to handle SAS hotplug events
2583 */ 3948 */
2584static void 3949static void
2585mptsas_hotplug_work(struct work_struct *work) 3950mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
3951 struct mptsas_hotplug_event *hot_plug_info)
2586{ 3952{
2587 struct mptsas_hotplug_event *ev =
2588 container_of(work, struct mptsas_hotplug_event, work);
2589
2590 MPT_ADAPTER *ioc = ev->ioc;
2591 struct mptsas_phyinfo *phy_info; 3953 struct mptsas_phyinfo *phy_info;
2592 struct sas_rphy *rphy;
2593 struct sas_port *port;
2594 struct scsi_device *sdev;
2595 struct scsi_target * starget; 3954 struct scsi_target * starget;
2596 struct sas_identify identify;
2597 char *ds = NULL;
2598 struct mptsas_devinfo sas_device; 3955 struct mptsas_devinfo sas_device;
2599 VirtTarget *vtarget; 3956 VirtTarget *vtarget;
2600 VirtDevice *vdevice; 3957 int i;
2601 3958
2602 mutex_lock(&ioc->sas_discovery_mutex); 3959 switch (hot_plug_info->event_type) {
2603 switch (ev->event_type) {
2604 case MPTSAS_DEL_DEVICE:
2605 3960
2606 phy_info = NULL; 3961 case MPTSAS_ADD_PHYSDISK:
2607 if (ev->phys_disk_num_valid) { 3962
2608 if (ev->hidden_raid_component){ 3963 if (!ioc->raid_data.pIocPg2)
2609 if (mptsas_sas_device_pg0(ioc, &sas_device, 3964 break;
2610 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 3965
2611 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3966 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
2612 (ev->channel << 8) + ev->id)) { 3967 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
2613 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 3968 hot_plug_info->id) {
2614 "%s: exit at line=%d\n", ioc->name, 3969 printk(MYIOC_s_WARN_FMT "firmware bug: unable "
2615 __func__, __LINE__)); 3970 "to add hidden disk - target_id matchs "
2616 break; 3971 "volume_id\n", ioc->name);
2617 } 3972 mptsas_free_fw_event(ioc, fw_event);
2618 phy_info = mptsas_find_phyinfo_by_sas_address( 3973 return;
2619 ioc, sas_device.sas_address); 3974 }
2620 }else
2621 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
2622 ioc, ev->channel, ev->phys_disk_num);
2623 } 3975 }
3976 mpt_findImVolumes(ioc);
2624 3977
3978 case MPTSAS_ADD_DEVICE:
3979 memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
3980 mptsas_sas_device_pg0(ioc, &sas_device,
3981 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
3982 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3983 (hot_plug_info->channel << 8) +
3984 hot_plug_info->id);
3985
3986 if (!sas_device.handle)
3987 return;
3988
3989 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
2625 if (!phy_info) 3990 if (!phy_info)
2626 phy_info = mptsas_find_phyinfo_by_target(ioc, 3991 break;
2627 ev->channel, ev->id);
2628 3992
2629 /* 3993 if (mptsas_get_rphy(phy_info))
2630 * Sanity checks, for non-existing phys and remote rphys. 3994 break;
2631 */ 3995
2632 if (!phy_info){ 3996 mptsas_add_end_device(ioc, phy_info);
3997 break;
3998
3999 case MPTSAS_DEL_DEVICE:
4000 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
4001 hot_plug_info->sas_address);
4002 mptsas_del_end_device(ioc, phy_info);
4003 break;
4004
4005 case MPTSAS_DEL_PHYSDISK:
4006
4007 mpt_findImVolumes(ioc);
4008
4009 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
4010 ioc, hot_plug_info->phys_disk_num,
4011 hot_plug_info->channel,
4012 hot_plug_info->id);
4013 mptsas_del_end_device(ioc, phy_info);
4014 break;
4015
4016 case MPTSAS_ADD_PHYSDISK_REPROBE:
4017
4018 if (mptsas_sas_device_pg0(ioc, &sas_device,
4019 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
4020 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
4021 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2633 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4022 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2634 "%s: exit at line=%d\n", ioc->name, 4023 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2635 __func__, __LINE__)); 4024 __func__, hot_plug_info->id, __LINE__));
2636 break; 4025 break;
2637 } 4026 }
2638 if (!phy_info->port_details) { 4027
4028 phy_info = mptsas_find_phyinfo_by_sas_address(
4029 ioc, sas_device.sas_address);
4030
4031 if (!phy_info) {
2639 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4032 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2640 "%s: exit at line=%d\n", ioc->name, 4033 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2641 __func__, __LINE__)); 4034 __func__, hot_plug_info->id, __LINE__));
2642 break; 4035 break;
2643 } 4036 }
2644 rphy = mptsas_get_rphy(phy_info); 4037
2645 if (!rphy) { 4038 starget = mptsas_get_starget(phy_info);
4039 if (!starget) {
2646 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4040 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2647 "%s: exit at line=%d\n", ioc->name, 4041 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2648 __func__, __LINE__)); 4042 __func__, hot_plug_info->id, __LINE__));
2649 break; 4043 break;
2650 } 4044 }
2651 4045
2652 port = mptsas_get_port(phy_info); 4046 vtarget = starget->hostdata;
2653 if (!port) { 4047 if (!vtarget) {
2654 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4048 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2655 "%s: exit at line=%d\n", ioc->name, 4049 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2656 __func__, __LINE__)); 4050 __func__, hot_plug_info->id, __LINE__));
2657 break; 4051 break;
2658 } 4052 }
2659 4053
2660 starget = mptsas_get_starget(phy_info); 4054 mpt_findImVolumes(ioc);
2661 if (starget) {
2662 vtarget = starget->hostdata;
2663 4055
2664 if (!vtarget) { 4056 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
2665 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4057 "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2666 "%s: exit at line=%d\n", ioc->name, 4058 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2667 __func__, __LINE__)); 4059 hot_plug_info->phys_disk_num, (unsigned long long)
2668 break; 4060 sas_device.sas_address);
2669 }
2670 4061
2671 /* 4062 vtarget->id = hot_plug_info->phys_disk_num;
2672 * Handling RAID components 4063 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
2673 */ 4064 phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
2674 if (ev->phys_disk_num_valid && 4065 mptsas_reprobe_target(starget, 1);
2675 ev->hidden_raid_component) {
2676 printk(MYIOC_s_INFO_FMT
2677 "RAID Hidding: channel=%d, id=%d, "
2678 "physdsk %d \n", ioc->name, ev->channel,
2679 ev->id, ev->phys_disk_num);
2680 vtarget->id = ev->phys_disk_num;
2681 vtarget->tflags |=
2682 MPT_TARGET_FLAGS_RAID_COMPONENT;
2683 mptsas_reprobe_target(starget, 1);
2684 phy_info->attached.phys_disk_num =
2685 ev->phys_disk_num;
2686 break;
2687 }
2688 }
2689
2690 if (phy_info->attached.device_info &
2691 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2692 ds = "ssp";
2693 if (phy_info->attached.device_info &
2694 MPI_SAS_DEVICE_INFO_STP_TARGET)
2695 ds = "stp";
2696 if (phy_info->attached.device_info &
2697 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2698 ds = "sata";
2699
2700 printk(MYIOC_s_INFO_FMT
2701 "removing %s device, channel %d, id %d, phy %d\n",
2702 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2703 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
2704 "delete port (%d)\n", ioc->name, port->port_identifier);
2705 sas_port_delete(port);
2706 mptsas_port_delete(ioc, phy_info->port_details);
2707 break; 4066 break;
2708 case MPTSAS_ADD_DEVICE:
2709 4067
2710 if (ev->phys_disk_num_valid) 4068 case MPTSAS_DEL_PHYSDISK_REPROBE:
2711 mpt_findImVolumes(ioc);
2712 4069
2713 /*
2714 * Refresh sas device pg0 data
2715 */
2716 if (mptsas_sas_device_pg0(ioc, &sas_device, 4070 if (mptsas_sas_device_pg0(ioc, &sas_device,
2717 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 4071 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2718 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 4072 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2719 (ev->channel << 8) + ev->id)) { 4073 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2720 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4074 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2721 "%s: exit at line=%d\n", ioc->name, 4075 "%s: fw_id=%d exit at line=%d\n",
2722 __func__, __LINE__)); 4076 ioc->name, __func__,
4077 hot_plug_info->id, __LINE__));
2723 break; 4078 break;
2724 } 4079 }
2725 4080
2726 __mptsas_discovery_work(ioc);
2727
2728 phy_info = mptsas_find_phyinfo_by_sas_address(ioc, 4081 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2729 sas_device.sas_address); 4082 sas_device.sas_address);
2730 4083 if (!phy_info) {
2731 if (!phy_info || !phy_info->port_details) {
2732 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4084 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2733 "%s: exit at line=%d\n", ioc->name, 4085 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2734 __func__, __LINE__)); 4086 __func__, hot_plug_info->id, __LINE__));
2735 break; 4087 break;
2736 } 4088 }
2737 4089
2738 starget = mptsas_get_starget(phy_info); 4090 starget = mptsas_get_starget(phy_info);
2739 if (starget && (!ev->hidden_raid_component)){ 4091 if (!starget) {
2740 4092 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2741 vtarget = starget->hostdata; 4093 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2742 4094 __func__, hot_plug_info->id, __LINE__));
2743 if (!vtarget) {
2744 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2745 "%s: exit at line=%d\n", ioc->name,
2746 __func__, __LINE__));
2747 break;
2748 }
2749 /*
2750 * Handling RAID components
2751 */
2752 if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2753 printk(MYIOC_s_INFO_FMT
2754 "RAID Exposing: channel=%d, id=%d, "
2755 "physdsk %d \n", ioc->name, ev->channel,
2756 ev->id, ev->phys_disk_num);
2757 vtarget->tflags &=
2758 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2759 vtarget->id = ev->id;
2760 mptsas_reprobe_target(starget, 0);
2761 phy_info->attached.phys_disk_num = ~0;
2762 }
2763 break; 4095 break;
2764 } 4096 }
2765 4097
2766 if (mptsas_get_rphy(phy_info)) { 4098 vtarget = starget->hostdata;
4099 if (!vtarget) {
2767 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4100 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2768 "%s: exit at line=%d\n", ioc->name, 4101 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2769 __func__, __LINE__)); 4102 __func__, hot_plug_info->id, __LINE__));
2770 if (ev->channel) printk("%d\n", __LINE__);
2771 break; 4103 break;
2772 } 4104 }
2773 4105
2774 port = mptsas_get_port(phy_info); 4106 if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
2775 if (!port) {
2776 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4107 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2777 "%s: exit at line=%d\n", ioc->name, 4108 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2778 __func__, __LINE__)); 4109 __func__, hot_plug_info->id, __LINE__));
2779 break; 4110 break;
2780 } 4111 }
2781 memcpy(&phy_info->attached, &sas_device,
2782 sizeof(struct mptsas_devinfo));
2783
2784 if (phy_info->attached.device_info &
2785 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2786 ds = "ssp";
2787 if (phy_info->attached.device_info &
2788 MPI_SAS_DEVICE_INFO_STP_TARGET)
2789 ds = "stp";
2790 if (phy_info->attached.device_info &
2791 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2792 ds = "sata";
2793
2794 printk(MYIOC_s_INFO_FMT
2795 "attaching %s device, channel %d, id %d, phy %d\n",
2796 ioc->name, ds, ev->channel, ev->id, ev->phy_id);
2797 4112
2798 mptsas_parse_device_info(&identify, &phy_info->attached); 4113 mpt_findImVolumes(ioc);
2799 rphy = sas_end_device_alloc(port);
2800 if (!rphy) {
2801 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2802 "%s: exit at line=%d\n", ioc->name,
2803 __func__, __LINE__));
2804 break; /* non-fatal: an rphy can be added later */
2805 }
2806 4114
2807 rphy->identify = identify; 4115 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
2808 if (sas_rphy_add(rphy)) { 4116 " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2809 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4117 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2810 "%s: exit at line=%d\n", ioc->name, 4118 hot_plug_info->phys_disk_num, (unsigned long long)
2811 __func__, __LINE__)); 4119 sas_device.sas_address);
2812 sas_rphy_free(rphy); 4120
2813 break; 4121 vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2814 } 4122 vtarget->id = hot_plug_info->id;
2815 mptsas_set_rphy(ioc, phy_info, rphy); 4123 phy_info->attached.phys_disk_num = ~0;
4124 mptsas_reprobe_target(starget, 0);
4125 mptsas_add_device_component_by_fw(ioc,
4126 hot_plug_info->channel, hot_plug_info->id);
2816 break; 4127 break;
4128
2817 case MPTSAS_ADD_RAID: 4129 case MPTSAS_ADD_RAID:
2818 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4130
2819 ev->id, 0);
2820 if (sdev) {
2821 scsi_device_put(sdev);
2822 break;
2823 }
2824 printk(MYIOC_s_INFO_FMT
2825 "attaching raid volume, channel %d, id %d\n",
2826 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2827 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2828 mpt_findImVolumes(ioc); 4131 mpt_findImVolumes(ioc);
4132 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
4133 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4134 hot_plug_info->id);
4135 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
4136 hot_plug_info->id, 0);
2829 break; 4137 break;
4138
2830 case MPTSAS_DEL_RAID: 4139 case MPTSAS_DEL_RAID:
2831 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4140
2832 ev->id, 0);
2833 if (!sdev)
2834 break;
2835 printk(MYIOC_s_INFO_FMT
2836 "removing raid volume, channel %d, id %d\n",
2837 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2838 vdevice = sdev->hostdata;
2839 scsi_remove_device(sdev);
2840 scsi_device_put(sdev);
2841 mpt_findImVolumes(ioc); 4141 mpt_findImVolumes(ioc);
4142 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4143 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4144 hot_plug_info->id);
4145 scsi_remove_device(hot_plug_info->sdev);
4146 scsi_device_put(hot_plug_info->sdev);
2842 break; 4147 break;
4148
2843 case MPTSAS_ADD_INACTIVE_VOLUME: 4149 case MPTSAS_ADD_INACTIVE_VOLUME:
4150
4151 mpt_findImVolumes(ioc);
2844 mptsas_adding_inactive_raid_components(ioc, 4152 mptsas_adding_inactive_raid_components(ioc,
2845 ev->channel, ev->id); 4153 hot_plug_info->channel, hot_plug_info->id);
2846 break; 4154 break;
2847 case MPTSAS_IGNORE_EVENT: 4155
2848 default: 4156 default:
2849 break; 4157 break;
2850 } 4158 }
2851 4159
2852 mutex_unlock(&ioc->sas_discovery_mutex); 4160 mptsas_free_fw_event(ioc, fw_event);
2853 kfree(ev);
2854} 4161}
2855 4162
2856static void 4163static void
2857mptsas_send_sas_event(MPT_ADAPTER *ioc, 4164mptsas_send_sas_event(struct fw_event_work *fw_event)
2858 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
2859{ 4165{
2860 struct mptsas_hotplug_event *ev; 4166 MPT_ADAPTER *ioc;
2861 u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo); 4167 struct mptsas_hotplug_event hot_plug_info;
2862 __le64 sas_address; 4168 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
4169 u32 device_info;
4170 u64 sas_address;
4171
4172 ioc = fw_event->ioc;
4173 sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
4174 fw_event->event_data;
4175 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
2863 4176
2864 if ((device_info & 4177 if ((device_info &
2865 (MPI_SAS_DEVICE_INFO_SSP_TARGET | 4178 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
2866 MPI_SAS_DEVICE_INFO_STP_TARGET | 4179 MPI_SAS_DEVICE_INFO_STP_TARGET |
2867 MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0) 4180 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
4181 mptsas_free_fw_event(ioc, fw_event);
4182 return;
4183 }
4184
4185 if (sas_event_data->ReasonCode ==
4186 MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
4187 mptbase_sas_persist_operation(ioc,
4188 MPI_SAS_OP_CLEAR_NOT_PRESENT);
4189 mptsas_free_fw_event(ioc, fw_event);
2868 return; 4190 return;
4191 }
2869 4192
2870 switch (sas_event_data->ReasonCode) { 4193 switch (sas_event_data->ReasonCode) {
2871 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 4194 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2872
2873 mptsas_target_reset_queue(ioc, sas_event_data);
2874 break;
2875
2876 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 4195 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2877 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4196 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
2878 if (!ev) { 4197 hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
2879 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4198 hot_plug_info.channel = sas_event_data->Bus;
2880 break; 4199 hot_plug_info.id = sas_event_data->TargetID;
2881 } 4200 hot_plug_info.phy_id = sas_event_data->PhyNum;
2882
2883 INIT_WORK(&ev->work, mptsas_hotplug_work);
2884 ev->ioc = ioc;
2885 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2886 ev->parent_handle =
2887 le16_to_cpu(sas_event_data->ParentDevHandle);
2888 ev->channel = sas_event_data->Bus;
2889 ev->id = sas_event_data->TargetID;
2890 ev->phy_id = sas_event_data->PhyNum;
2891 memcpy(&sas_address, &sas_event_data->SASAddress, 4201 memcpy(&sas_address, &sas_event_data->SASAddress,
2892 sizeof(__le64)); 4202 sizeof(u64));
2893 ev->sas_address = le64_to_cpu(sas_address); 4203 hot_plug_info.sas_address = le64_to_cpu(sas_address);
2894 ev->device_info = device_info; 4204 hot_plug_info.device_info = device_info;
2895
2896 if (sas_event_data->ReasonCode & 4205 if (sas_event_data->ReasonCode &
2897 MPI_EVENT_SAS_DEV_STAT_RC_ADDED) 4206 MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
2898 ev->event_type = MPTSAS_ADD_DEVICE; 4207 hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
2899 else 4208 else
2900 ev->event_type = MPTSAS_DEL_DEVICE; 4209 hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
2901 schedule_work(&ev->work); 4210 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
2902 break; 4211 break;
4212
2903 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 4213 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
2904 /* 4214 mptbase_sas_persist_operation(ioc,
2905 * Persistent table is full. 4215 MPI_SAS_OP_CLEAR_NOT_PRESENT);
2906 */ 4216 mptsas_free_fw_event(ioc, fw_event);
2907 INIT_WORK(&ioc->sas_persist_task,
2908 mptsas_persist_clear_table);
2909 schedule_work(&ioc->sas_persist_task);
2910 break; 4217 break;
2911 /* 4218
2912 * TODO, handle other events
2913 */
2914 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 4219 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
2915 case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 4220 /* TODO */
2916 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 4221 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2917 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 4222 /* TODO */
2918 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
2919 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
2920 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
2921 default: 4223 default:
4224 mptsas_free_fw_event(ioc, fw_event);
2922 break; 4225 break;
2923 } 4226 }
2924} 4227}
4228
2925static void 4229static void
2926mptsas_send_raid_event(MPT_ADAPTER *ioc, 4230mptsas_send_raid_event(struct fw_event_work *fw_event)
2927 EVENT_DATA_RAID *raid_event_data)
2928{ 4231{
2929 struct mptsas_hotplug_event *ev; 4232 MPT_ADAPTER *ioc;
2930 int status = le32_to_cpu(raid_event_data->SettingsStatus); 4233 EVENT_DATA_RAID *raid_event_data;
2931 int state = (status >> 8) & 0xff; 4234 struct mptsas_hotplug_event hot_plug_info;
2932 4235 int status;
2933 if (ioc->bus_type != SAS) 4236 int state;
2934 return; 4237 struct scsi_device *sdev = NULL;
2935 4238 VirtDevice *vdevice = NULL;
2936 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4239 RaidPhysDiskPage0_t phys_disk;
2937 if (!ev) { 4240
2938 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4241 ioc = fw_event->ioc;
2939 return; 4242 raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
4243 status = le32_to_cpu(raid_event_data->SettingsStatus);
4244 state = (status >> 8) & 0xff;
4245
4246 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4247 hot_plug_info.id = raid_event_data->VolumeID;
4248 hot_plug_info.channel = raid_event_data->VolumeBus;
4249 hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
4250
4251 if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
4252 raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
4253 raid_event_data->ReasonCode ==
4254 MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
4255 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
4256 hot_plug_info.id, 0);
4257 hot_plug_info.sdev = sdev;
4258 if (sdev)
4259 vdevice = sdev->hostdata;
2940 } 4260 }
2941 4261
2942 INIT_WORK(&ev->work, mptsas_hotplug_work); 4262 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
2943 ev->ioc = ioc; 4263 "ReasonCode=%02x\n", ioc->name, __func__,
2944 ev->id = raid_event_data->VolumeID; 4264 raid_event_data->ReasonCode));
2945 ev->channel = raid_event_data->VolumeBus;
2946 ev->event_type = MPTSAS_IGNORE_EVENT;
2947 4265
2948 switch (raid_event_data->ReasonCode) { 4266 switch (raid_event_data->ReasonCode) {
2949 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 4267 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
2950 ev->phys_disk_num_valid = 1; 4268 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
2951 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2952 ev->event_type = MPTSAS_ADD_DEVICE;
2953 break; 4269 break;
2954 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: 4270 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
2955 ev->phys_disk_num_valid = 1; 4271 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
2956 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2957 ev->hidden_raid_component = 1;
2958 ev->event_type = MPTSAS_DEL_DEVICE;
2959 break; 4272 break;
2960 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: 4273 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
2961 switch (state) { 4274 switch (state) {
2962 case MPI_PD_STATE_ONLINE: 4275 case MPI_PD_STATE_ONLINE:
2963 case MPI_PD_STATE_NOT_COMPATIBLE: 4276 case MPI_PD_STATE_NOT_COMPATIBLE:
2964 ev->phys_disk_num_valid = 1; 4277 mpt_raid_phys_disk_pg0(ioc,
2965 ev->phys_disk_num = raid_event_data->PhysDiskNum; 4278 raid_event_data->PhysDiskNum, &phys_disk);
2966 ev->hidden_raid_component = 1; 4279 hot_plug_info.id = phys_disk.PhysDiskID;
2967 ev->event_type = MPTSAS_ADD_DEVICE; 4280 hot_plug_info.channel = phys_disk.PhysDiskBus;
4281 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
2968 break; 4282 break;
4283 case MPI_PD_STATE_FAILED:
2969 case MPI_PD_STATE_MISSING: 4284 case MPI_PD_STATE_MISSING:
2970 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: 4285 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
2971 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: 4286 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
2972 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: 4287 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
2973 ev->phys_disk_num_valid = 1; 4288 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
2974 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2975 ev->event_type = MPTSAS_DEL_DEVICE;
2976 break; 4289 break;
2977 default: 4290 default:
2978 break; 4291 break;
2979 } 4292 }
2980 break; 4293 break;
2981 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 4294 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
2982 ev->event_type = MPTSAS_DEL_RAID; 4295 if (!sdev)
4296 break;
4297 vdevice->vtarget->deleted = 1; /* block IO */
4298 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2983 break; 4299 break;
2984 case MPI_EVENT_RAID_RC_VOLUME_CREATED: 4300 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
2985 ev->event_type = MPTSAS_ADD_RAID; 4301 if (sdev) {
4302 scsi_device_put(sdev);
4303 break;
4304 }
4305 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2986 break; 4306 break;
2987 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 4307 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
4308 if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
4309 if (!sdev)
4310 break;
4311 vdevice->vtarget->deleted = 1; /* block IO */
4312 hot_plug_info.event_type = MPTSAS_DEL_RAID;
4313 break;
4314 }
2988 switch (state) { 4315 switch (state) {
2989 case MPI_RAIDVOL0_STATUS_STATE_FAILED: 4316 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
2990 case MPI_RAIDVOL0_STATUS_STATE_MISSING: 4317 case MPI_RAIDVOL0_STATUS_STATE_MISSING:
2991 ev->event_type = MPTSAS_DEL_RAID; 4318 if (!sdev)
4319 break;
4320 vdevice->vtarget->deleted = 1; /* block IO */
4321 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2992 break; 4322 break;
2993 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: 4323 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
2994 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: 4324 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
2995 ev->event_type = MPTSAS_ADD_RAID; 4325 if (sdev) {
4326 scsi_device_put(sdev);
4327 break;
4328 }
4329 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2996 break; 4330 break;
2997 default: 4331 default:
2998 break; 4332 break;
@@ -3001,32 +4335,188 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
3001 default: 4335 default:
3002 break; 4336 break;
3003 } 4337 }
3004 schedule_work(&ev->work); 4338
4339 if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
4340 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
4341 else
4342 mptsas_free_fw_event(ioc, fw_event);
3005} 4343}
3006 4344
3007static void 4345/**
3008mptsas_send_discovery_event(MPT_ADAPTER *ioc, 4346 * mptsas_issue_tm - send mptsas internal tm request
3009 EVENT_DATA_SAS_DISCOVERY *discovery_data) 4347 * @ioc: Pointer to MPT_ADAPTER structure
4348 * @type: Task Management type
4349 * @channel: channel number for task management
4350 * @id: Logical Target ID for reset (if appropriate)
4351 * @lun: Logical unit for reset (if appropriate)
4352 * @task_context: Context for the task to be aborted
4353 * @timeout: timeout for task management control
4354 *
4355 * return 0 on success and -1 on failure:
4356 *
4357 */
4358static int
4359mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
4360 int task_context, ulong timeout, u8 *issue_reset)
3010{ 4361{
3011 struct mptsas_discovery_event *ev; 4362 MPT_FRAME_HDR *mf;
4363 SCSITaskMgmt_t *pScsiTm;
4364 int retval;
4365 unsigned long timeleft;
4366
4367 *issue_reset = 0;
4368 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
4369 if (mf == NULL) {
4370 retval = -1; /* return failure */
4371 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
4372 "msg frames!!\n", ioc->name));
4373 goto out;
4374 }
3012 4375
3013 /* 4376 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
3014 * DiscoveryStatus 4377 "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
3015 * 4378 "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
3016 * This flag will be non-zero when firmware 4379 type, timeout, channel, id, (unsigned long long)lun,
3017 * kicks off discovery, and return to zero 4380 task_context));
3018 * once its completed. 4381
3019 */ 4382 pScsiTm = (SCSITaskMgmt_t *) mf;
3020 if (discovery_data->DiscoveryStatus) 4383 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
3021 return; 4384 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4385 pScsiTm->TaskType = type;
4386 pScsiTm->MsgFlags = 0;
4387 pScsiTm->TargetID = id;
4388 pScsiTm->Bus = channel;
4389 pScsiTm->ChainOffset = 0;
4390 pScsiTm->Reserved = 0;
4391 pScsiTm->Reserved1 = 0;
4392 pScsiTm->TaskMsgContext = task_context;
4393 int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
4394
4395 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4396 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
4397 retval = 0;
4398 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
4399
4400 /* Now wait for the command to complete */
4401 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
4402 timeout*HZ);
4403 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
4404 retval = -1; /* return failure */
4405 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
4406 "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
4407 mpt_free_msg_frame(ioc, mf);
4408 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
4409 goto out;
4410 *issue_reset = 1;
4411 goto out;
4412 }
3022 4413
3023 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4414 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
3024 if (!ev) 4415 retval = -1; /* return failure */
4416 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4417 "TaskMgmt request: failed with no reply\n", ioc->name));
4418 goto out;
4419 }
4420
4421 out:
4422 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4423 return retval;
4424}
4425
4426/**
4427 * mptsas_broadcast_primative_work - Handle broadcast primitives
4428 * @work: work queue payload containing info describing the event
4429 *
4430 * this will be handled in workqueue context.
4431 */
4432static void
4433mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
4434{
4435 MPT_ADAPTER *ioc = fw_event->ioc;
4436 MPT_FRAME_HDR *mf;
4437 VirtDevice *vdevice;
4438 int ii;
4439 struct scsi_cmnd *sc;
4440 SCSITaskMgmtReply_t *pScsiTmReply;
4441 u8 issue_reset;
4442 int task_context;
4443 u8 channel, id;
4444 int lun;
4445 u32 termination_count;
4446 u32 query_count;
4447
4448 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4449 "%s - enter\n", ioc->name, __func__));
4450
4451 mutex_lock(&ioc->taskmgmt_cmds.mutex);
4452 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
4453 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4454 mptsas_requeue_fw_event(ioc, fw_event, 1000);
3025 return; 4455 return;
3026 INIT_WORK(&ev->work, mptsas_discovery_work); 4456 }
3027 ev->ioc = ioc; 4457
3028 schedule_work(&ev->work); 4458 issue_reset = 0;
3029}; 4459 termination_count = 0;
4460 query_count = 0;
4461 mpt_findImVolumes(ioc);
4462 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
4463
4464 for (ii = 0; ii < ioc->req_depth; ii++) {
4465 if (ioc->fw_events_off)
4466 goto out;
4467 sc = mptscsih_get_scsi_lookup(ioc, ii);
4468 if (!sc)
4469 continue;
4470 mf = MPT_INDEX_2_MFPTR(ioc, ii);
4471 if (!mf)
4472 continue;
4473 task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
4474 vdevice = sc->device->hostdata;
4475 if (!vdevice || !vdevice->vtarget)
4476 continue;
4477 if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
4478 continue; /* skip hidden raid components */
4479 if (vdevice->vtarget->raidVolume)
4480 continue; /* skip hidden raid components */
4481 channel = vdevice->vtarget->channel;
4482 id = vdevice->vtarget->id;
4483 lun = vdevice->lun;
4484 if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
4485 channel, id, (u64)lun, task_context, 30, &issue_reset))
4486 goto out;
4487 query_count++;
4488 termination_count +=
4489 le32_to_cpu(pScsiTmReply->TerminationCount);
4490 if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
4491 (pScsiTmReply->ResponseCode ==
4492 MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
4493 pScsiTmReply->ResponseCode ==
4494 MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
4495 continue;
4496 if (mptsas_issue_tm(ioc,
4497 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
4498 channel, id, (u64)lun, 0, 30, &issue_reset))
4499 goto out;
4500 termination_count +=
4501 le32_to_cpu(pScsiTmReply->TerminationCount);
4502 }
4503
4504 out:
4505 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4506 "%s - exit, query_count = %d termination_count = %d\n",
4507 ioc->name, __func__, query_count, termination_count));
4508
4509 ioc->broadcast_aen_busy = 0;
4510 mpt_clear_taskmgmt_in_progress_flag(ioc);
4511 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4512
4513 if (issue_reset) {
4514 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
4515 ioc->name, __func__);
4516 mpt_HardResetHandler(ioc, CAN_SLEEP);
4517 }
4518 mptsas_free_fw_event(ioc, fw_event);
4519}
3030 4520
3031/* 4521/*
3032 * mptsas_send_ir2_event - handle exposing hidden disk when 4522 * mptsas_send_ir2_event - handle exposing hidden disk when
@@ -3037,76 +4527,159 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
3037 * 4527 *
3038 */ 4528 */
3039static void 4529static void
3040mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data) 4530mptsas_send_ir2_event(struct fw_event_work *fw_event)
3041{ 4531{
3042 struct mptsas_hotplug_event *ev; 4532 MPT_ADAPTER *ioc;
3043 4533 struct mptsas_hotplug_event hot_plug_info;
3044 if (ir2_data->ReasonCode != 4534 MPI_EVENT_DATA_IR2 *ir2_data;
3045 MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED) 4535 u8 reasonCode;
3046 return; 4536 RaidPhysDiskPage0_t phys_disk;
3047 4537
3048 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4538 ioc = fw_event->ioc;
3049 if (!ev) 4539 ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
4540 reasonCode = ir2_data->ReasonCode;
4541
4542 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
4543 "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
4544
4545 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4546 hot_plug_info.id = ir2_data->TargetID;
4547 hot_plug_info.channel = ir2_data->Bus;
4548 switch (reasonCode) {
4549 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
4550 hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
4551 break;
4552 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
4553 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4554 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
4555 break;
4556 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
4557 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4558 mpt_raid_phys_disk_pg0(ioc,
4559 ir2_data->PhysDiskNum, &phys_disk);
4560 hot_plug_info.id = phys_disk.PhysDiskID;
4561 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
4562 break;
4563 default:
4564 mptsas_free_fw_event(ioc, fw_event);
3050 return; 4565 return;
3051 4566 }
3052 INIT_WORK(&ev->work, mptsas_hotplug_work); 4567 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
3053 ev->ioc = ioc; 4568}
3054 ev->id = ir2_data->TargetID;
3055 ev->channel = ir2_data->Bus;
3056 ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
3057
3058 schedule_work(&ev->work);
3059};
3060 4569
3061static int 4570static int
3062mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) 4571mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
3063{ 4572{
3064 int rc=1; 4573 u32 event = le32_to_cpu(reply->Event);
3065 u8 event = le32_to_cpu(reply->Event) & 0xFF; 4574 int sz, event_data_sz;
4575 struct fw_event_work *fw_event;
4576 unsigned long delay;
3066 4577
3067 if (!ioc->sh) 4578 /* events turned off due to host reset or driver unloading */
3068 goto out; 4579 if (ioc->fw_events_off)
3069 4580 return 0;
3070 /*
3071 * sas_discovery_ignore_events
3072 *
3073 * This flag is to prevent anymore processing of
3074 * sas events once mptsas_remove function is called.
3075 */
3076 if (ioc->sas_discovery_ignore_events) {
3077 rc = mptscsih_event_process(ioc, reply);
3078 goto out;
3079 }
3080 4581
4582 delay = msecs_to_jiffies(1);
3081 switch (event) { 4583 switch (event) {
4584 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
4585 {
4586 EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
4587 (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
4588 if (broadcast_event_data->Primitive !=
4589 MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
4590 return 0;
4591 if (ioc->broadcast_aen_busy)
4592 return 0;
4593 ioc->broadcast_aen_busy = 1;
4594 break;
4595 }
3082 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 4596 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
3083 mptsas_send_sas_event(ioc, 4597 {
3084 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); 4598 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
4599 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
4600
4601 if (sas_event_data->ReasonCode ==
4602 MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
4603 mptsas_target_reset_queue(ioc, sas_event_data);
4604 return 0;
4605 }
3085 break; 4606 break;
3086 case MPI_EVENT_INTEGRATED_RAID: 4607 }
3087 mptsas_send_raid_event(ioc, 4608 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
3088 (EVENT_DATA_RAID *)reply->Data); 4609 {
4610 MpiEventDataSasExpanderStatusChange_t *expander_data =
4611 (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
4612
4613 if (ioc->old_sas_discovery_protocal)
4614 return 0;
4615
4616 if (expander_data->ReasonCode ==
4617 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
4618 ioc->device_missing_delay)
4619 delay = HZ * ioc->device_missing_delay;
3089 break; 4620 break;
4621 }
4622 case MPI_EVENT_SAS_DISCOVERY:
4623 {
4624 u32 discovery_status;
4625 EventDataSasDiscovery_t *discovery_data =
4626 (EventDataSasDiscovery_t *)reply->Data;
4627
4628 discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
4629 ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
4630 if (ioc->old_sas_discovery_protocal && !discovery_status)
4631 mptsas_queue_rescan(ioc);
4632 return 0;
4633 }
4634 case MPI_EVENT_INTEGRATED_RAID:
3090 case MPI_EVENT_PERSISTENT_TABLE_FULL: 4635 case MPI_EVENT_PERSISTENT_TABLE_FULL:
3091 INIT_WORK(&ioc->sas_persist_task,
3092 mptsas_persist_clear_table);
3093 schedule_work(&ioc->sas_persist_task);
3094 break;
3095 case MPI_EVENT_SAS_DISCOVERY:
3096 mptsas_send_discovery_event(ioc,
3097 (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
3098 break;
3099 case MPI_EVENT_IR2: 4636 case MPI_EVENT_IR2:
3100 mptsas_send_ir2_event(ioc, 4637 case MPI_EVENT_SAS_PHY_LINK_STATUS:
3101 (PTR_MPI_EVENT_DATA_IR2)reply->Data); 4638 case MPI_EVENT_QUEUE_FULL:
3102 break; 4639 break;
3103 default: 4640 default:
3104 rc = mptscsih_event_process(ioc, reply); 4641 return 0;
3105 break;
3106 } 4642 }
3107 out:
3108 4643
3109 return rc; 4644 event_data_sz = ((reply->MsgLength * 4) -
4645 offsetof(EventNotificationReply_t, Data));
4646 sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
4647 fw_event = kzalloc(sz, GFP_ATOMIC);
4648 if (!fw_event) {
4649 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
4650 __func__, __LINE__);
4651 return 0;
4652 }
4653 memcpy(fw_event->event_data, reply->Data, event_data_sz);
4654 fw_event->event = event;
4655 fw_event->ioc = ioc;
4656 mptsas_add_fw_event(ioc, fw_event, delay);
4657 return 0;
4658}
4659
4660/* Delete a volume when no longer listed in ioc pg2
4661 */
4662static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
4663{
4664 struct scsi_device *sdev;
4665 int i;
4666
4667 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
4668 if (!sdev)
4669 return;
4670 if (!ioc->raid_data.pIocPg2)
4671 goto out;
4672 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
4673 goto out;
4674 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
4675 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
4676 goto release_sdev;
4677 out:
4678 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4679 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
4680 scsi_remove_device(sdev);
4681 release_sdev:
4682 scsi_device_put(sdev);
3110} 4683}
3111 4684
3112static int 4685static int
@@ -3128,6 +4701,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3128 return r; 4701 return r;
3129 4702
3130 ioc = pci_get_drvdata(pdev); 4703 ioc = pci_get_drvdata(pdev);
4704 mptsas_fw_event_off(ioc);
3131 ioc->DoneCtx = mptsasDoneCtx; 4705 ioc->DoneCtx = mptsasDoneCtx;
3132 ioc->TaskCtx = mptsasTaskCtx; 4706 ioc->TaskCtx = mptsasTaskCtx;
3133 ioc->InternalCtx = mptsasInternalCtx; 4707 ioc->InternalCtx = mptsasInternalCtx;
@@ -3211,17 +4785,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3211 * A slightly different algorithm is required for 4785 * A slightly different algorithm is required for
3212 * 64bit SGEs. 4786 * 64bit SGEs.
3213 */ 4787 */
3214 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4788 scale = ioc->req_sz/ioc->SGE_size;
3215 if (sizeof(dma_addr_t) == sizeof(u64)) { 4789 if (ioc->sg_addr_size == sizeof(u64)) {
3216 numSGE = (scale - 1) * 4790 numSGE = (scale - 1) *
3217 (ioc->facts.MaxChainDepth-1) + scale + 4791 (ioc->facts.MaxChainDepth-1) + scale +
3218 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 4792 (ioc->req_sz - 60) / ioc->SGE_size;
3219 sizeof(u32));
3220 } else { 4793 } else {
3221 numSGE = 1 + (scale - 1) * 4794 numSGE = 1 + (scale - 1) *
3222 (ioc->facts.MaxChainDepth-1) + scale + 4795 (ioc->facts.MaxChainDepth-1) + scale +
3223 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 4796 (ioc->req_sz - 64) / ioc->SGE_size;
3224 sizeof(u32));
3225 } 4797 }
3226 4798
3227 if (numSGE < sh->sg_tablesize) { 4799 if (numSGE < sh->sg_tablesize) {
@@ -3251,9 +4823,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3251 4823
3252 /* Clear the TM flags 4824 /* Clear the TM flags
3253 */ 4825 */
3254 hd->tmPending = 0;
3255 hd->tmState = TM_STATE_NONE;
3256 hd->resetPending = 0;
3257 hd->abortSCpnt = NULL; 4826 hd->abortSCpnt = NULL;
3258 4827
3259 /* Clear the pointer used to store 4828 /* Clear the pointer used to store
@@ -3273,10 +4842,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3273 4842
3274 ioc->sas_data.ptClear = mpt_pt_clear; 4843 ioc->sas_data.ptClear = mpt_pt_clear;
3275 4844
3276 init_waitqueue_head(&hd->scandv_waitq);
3277 hd->scandv_wait_done = 0;
3278 hd->last_queue_full = 0; 4845 hd->last_queue_full = 0;
3279 INIT_LIST_HEAD(&hd->target_reset_list); 4846 INIT_LIST_HEAD(&hd->target_reset_list);
4847 INIT_LIST_HEAD(&ioc->sas_device_info_list);
4848 mutex_init(&ioc->sas_device_info_mutex);
4849
3280 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 4850 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
3281 4851
3282 if (ioc->sas_data.ptClear==1) { 4852 if (ioc->sas_data.ptClear==1) {
@@ -3291,8 +4861,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3291 goto out_mptsas_probe; 4861 goto out_mptsas_probe;
3292 } 4862 }
3293 4863
4864 /* older firmware doesn't support expander events */
4865 if ((ioc->facts.HeaderVersion >> 8) < 0xE)
4866 ioc->old_sas_discovery_protocal = 1;
3294 mptsas_scan_sas_topology(ioc); 4867 mptsas_scan_sas_topology(ioc);
3295 4868 mptsas_fw_event_on(ioc);
3296 return 0; 4869 return 0;
3297 4870
3298 out_mptsas_probe: 4871 out_mptsas_probe:
@@ -3301,12 +4874,25 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3301 return error; 4874 return error;
3302} 4875}
3303 4876
4877void
4878mptsas_shutdown(struct pci_dev *pdev)
4879{
4880 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
4881
4882 mptsas_fw_event_off(ioc);
4883 mptsas_cleanup_fw_event_q(ioc);
4884}
4885
3304static void __devexit mptsas_remove(struct pci_dev *pdev) 4886static void __devexit mptsas_remove(struct pci_dev *pdev)
3305{ 4887{
3306 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 4888 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
3307 struct mptsas_portinfo *p, *n; 4889 struct mptsas_portinfo *p, *n;
3308 int i; 4890 int i;
3309 4891
4892 mptsas_shutdown(pdev);
4893
4894 mptsas_del_device_components(ioc);
4895
3310 ioc->sas_discovery_ignore_events = 1; 4896 ioc->sas_discovery_ignore_events = 1;
3311 sas_remove_host(ioc->sh); 4897 sas_remove_host(ioc->sh);
3312 4898
@@ -3315,11 +4901,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
3315 list_del(&p->list); 4901 list_del(&p->list);
3316 for (i = 0 ; i < p->num_phys ; i++) 4902 for (i = 0 ; i < p->num_phys ; i++)
3317 mptsas_port_delete(ioc, p->phy_info[i].port_details); 4903 mptsas_port_delete(ioc, p->phy_info[i].port_details);
4904
3318 kfree(p->phy_info); 4905 kfree(p->phy_info);
3319 kfree(p); 4906 kfree(p);
3320 } 4907 }
3321 mutex_unlock(&ioc->sas_topology_mutex); 4908 mutex_unlock(&ioc->sas_topology_mutex);
3322 4909 ioc->hba_port_info = NULL;
3323 mptscsih_remove(pdev); 4910 mptscsih_remove(pdev);
3324} 4911}
3325 4912
@@ -3344,7 +4931,7 @@ static struct pci_driver mptsas_driver = {
3344 .id_table = mptsas_pci_table, 4931 .id_table = mptsas_pci_table,
3345 .probe = mptsas_probe, 4932 .probe = mptsas_probe,
3346 .remove = __devexit_p(mptsas_remove), 4933 .remove = __devexit_p(mptsas_remove),
3347 .shutdown = mptscsih_shutdown, 4934 .shutdown = mptsas_shutdown,
3348#ifdef CONFIG_PM 4935#ifdef CONFIG_PM
3349 .suspend = mptscsih_suspend, 4936 .suspend = mptscsih_suspend,
3350 .resume = mptscsih_resume, 4937 .resume = mptscsih_resume,
@@ -3364,10 +4951,12 @@ mptsas_init(void)
3364 return -ENODEV; 4951 return -ENODEV;
3365 4952
3366 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); 4953 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
3367 mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); 4954 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
3368 mptsasInternalCtx = 4955 mptsasInternalCtx =
3369 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); 4956 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
3370 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); 4957 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
4958 mptsasDeviceResetCtx =
4959 mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
3371 4960
3372 mpt_event_register(mptsasDoneCtx, mptsas_event_process); 4961 mpt_event_register(mptsasDoneCtx, mptsas_event_process);
3373 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); 4962 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@ -3392,6 +4981,7 @@ mptsas_exit(void)
3392 mpt_deregister(mptsasInternalCtx); 4981 mpt_deregister(mptsasInternalCtx);
3393 mpt_deregister(mptsasTaskCtx); 4982 mpt_deregister(mptsasTaskCtx);
3394 mpt_deregister(mptsasDoneCtx); 4983 mpt_deregister(mptsasDoneCtx);
4984 mpt_deregister(mptsasDeviceResetCtx);
3395} 4985}
3396 4986
3397module_init(mptsas_init); 4987module_init(mptsas_init);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 2b544e0877e6..953c2bfcf6aa 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
53 struct list_head list; 53 struct list_head list;
54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; 54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
55 u8 target_reset_issued; 55 u8 target_reset_issued;
56 unsigned long time_count;
56}; 57};
57 58
58enum mptsas_hotplug_action { 59enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
60 MPTSAS_DEL_DEVICE, 61 MPTSAS_DEL_DEVICE,
61 MPTSAS_ADD_RAID, 62 MPTSAS_ADD_RAID,
62 MPTSAS_DEL_RAID, 63 MPTSAS_DEL_RAID,
64 MPTSAS_ADD_PHYSDISK,
65 MPTSAS_ADD_PHYSDISK_REPROBE,
66 MPTSAS_DEL_PHYSDISK,
67 MPTSAS_DEL_PHYSDISK_REPROBE,
63 MPTSAS_ADD_INACTIVE_VOLUME, 68 MPTSAS_ADD_INACTIVE_VOLUME,
64 MPTSAS_IGNORE_EVENT, 69 MPTSAS_IGNORE_EVENT,
65}; 70};
66 71
72struct mptsas_mapping{
73 u8 id;
74 u8 channel;
75};
76
77struct mptsas_device_info {
78 struct list_head list;
79 struct mptsas_mapping os; /* operating system mapping*/
80 struct mptsas_mapping fw; /* firmware mapping */
81 u64 sas_address;
82 u32 device_info; /* specific bits for devices */
83 u16 slot; /* enclosure slot id */
84 u64 enclosure_logical_id; /*enclosure address */
85 u8 is_logical_volume; /* is this logical volume */
86 /* this belongs to volume */
87 u8 is_hidden_raid_component;
88 /* this valid when is_hidden_raid_component set */
89 u8 volume_id;
90 /* cached data for a removed device */
91 u8 is_cached;
92};
93
67struct mptsas_hotplug_event { 94struct mptsas_hotplug_event {
68 struct work_struct work;
69 MPT_ADAPTER *ioc; 95 MPT_ADAPTER *ioc;
70 enum mptsas_hotplug_action event_type; 96 enum mptsas_hotplug_action event_type;
71 u64 sas_address; 97 u64 sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
73 u8 id; 99 u8 id;
74 u32 device_info; 100 u32 device_info;
75 u16 handle; 101 u16 handle;
76 u16 parent_handle;
77 u8 phy_id; 102 u8 phy_id;
78 u8 phys_disk_num_valid; /* hrc (hidden raid component) */
79 u8 phys_disk_num; /* hrc - unique index*/ 103 u8 phys_disk_num; /* hrc - unique index*/
80 u8 hidden_raid_component; /* hrc - don't expose*/ 104 struct scsi_device *sdev;
105};
106
107struct fw_event_work {
108 struct list_head list;
109 struct delayed_work work;
110 MPT_ADAPTER *ioc;
111 u32 event;
112 u8 retries;
113 u8 event_data[1];
81}; 114};
82 115
83struct mptsas_discovery_event { 116struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e62c6bc4ad33..024e8305bcf2 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION);
80/* 80/*
81 * Other private/forward protos... 81 * Other private/forward protos...
82 */ 82 */
83static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); 83struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); 84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); 85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); 86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
@@ -92,18 +92,24 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
92 SCSIIORequest_t *pReq, int req_idx); 92 SCSIIORequest_t *pReq, int req_idx);
93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); 93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
95static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
96static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
97 95
98static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); 96int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
97 int lun, int ctx2abort, ulong timeout);
99 98
100int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 99int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
101int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 100int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
102 101
102void
103mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
104static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
105 MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
103int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 106int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
104static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 107static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
105static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); 108static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
106 109
110static int
111mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
112 SCSITaskMgmtReply_t *pScsiTmReply);
107void mptscsih_remove(struct pci_dev *); 113void mptscsih_remove(struct pci_dev *);
108void mptscsih_shutdown(struct pci_dev *); 114void mptscsih_shutdown(struct pci_dev *);
109#ifdef CONFIG_PM 115#ifdef CONFIG_PM
@@ -113,69 +119,6 @@ int mptscsih_resume(struct pci_dev *pdev);
113 119
114#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE 120#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
115 121
116/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
117/**
118 * mptscsih_add_sge - Place a simple SGE at address pAddr.
119 * @pAddr: virtual address for SGE
120 * @flagslength: SGE flags and data transfer length
121 * @dma_addr: Physical address
122 *
123 * This routine places a MPT request frame back on the MPT adapter's
124 * FreeQ.
125 */
126static inline void
127mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
128{
129 if (sizeof(dma_addr_t) == sizeof(u64)) {
130 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
131 u32 tmp = dma_addr & 0xFFFFFFFF;
132
133 pSge->FlagsLength = cpu_to_le32(flagslength);
134 pSge->Address.Low = cpu_to_le32(tmp);
135 tmp = (u32) ((u64)dma_addr >> 32);
136 pSge->Address.High = cpu_to_le32(tmp);
137
138 } else {
139 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
140 pSge->FlagsLength = cpu_to_le32(flagslength);
141 pSge->Address = cpu_to_le32(dma_addr);
142 }
143} /* mptscsih_add_sge() */
144
145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146/**
147 * mptscsih_add_chain - Place a chain SGE at address pAddr.
148 * @pAddr: virtual address for SGE
149 * @next: nextChainOffset value (u32's)
150 * @length: length of next SGL segment
151 * @dma_addr: Physical address
152 *
153 * This routine places a MPT request frame back on the MPT adapter's
154 * FreeQ.
155 */
156static inline void
157mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
158{
159 if (sizeof(dma_addr_t) == sizeof(u64)) {
160 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
161 u32 tmp = dma_addr & 0xFFFFFFFF;
162
163 pChain->Length = cpu_to_le16(length);
164 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
165
166 pChain->NextChainOffset = next;
167
168 pChain->Address.Low = cpu_to_le32(tmp);
169 tmp = (u32) ((u64)dma_addr >> 32);
170 pChain->Address.High = cpu_to_le32(tmp);
171 } else {
172 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
173 pChain->Length = cpu_to_le16(length);
174 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
175 pChain->NextChainOffset = next;
176 pChain->Address = cpu_to_le32(dma_addr);
177 }
178} /* mptscsih_add_chain() */
179 122
180/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 123/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
181/* 124/*
@@ -281,10 +224,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
281 */ 224 */
282 225
283nextSGEset: 226nextSGEset:
284 numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) ); 227 numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
285 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; 228 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
286 229
287 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; 230 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
288 231
289 /* Get first (num - 1) SG elements 232 /* Get first (num - 1) SG elements
290 * Skip any SG entries with a length of 0 233 * Skip any SG entries with a length of 0
@@ -293,17 +236,19 @@ nextSGEset:
293 for (ii=0; ii < (numSgeThisFrame-1); ii++) { 236 for (ii=0; ii < (numSgeThisFrame-1); ii++) {
294 thisxfer = sg_dma_len(sg); 237 thisxfer = sg_dma_len(sg);
295 if (thisxfer == 0) { 238 if (thisxfer == 0) {
296 sg = sg_next(sg); /* Get next SG element from the OS */ 239 /* Get next SG element from the OS */
240 sg = sg_next(sg);
297 sg_done++; 241 sg_done++;
298 continue; 242 continue;
299 } 243 }
300 244
301 v2 = sg_dma_address(sg); 245 v2 = sg_dma_address(sg);
302 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 246 ioc->add_sge(psge, sgflags | thisxfer, v2);
303 247
304 sg = sg_next(sg); /* Get next SG element from the OS */ 248 /* Get next SG element from the OS */
305 psge += (sizeof(u32) + sizeof(dma_addr_t)); 249 sg = sg_next(sg);
306 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 250 psge += ioc->SGE_size;
251 sgeOffset += ioc->SGE_size;
307 sg_done++; 252 sg_done++;
308 } 253 }
309 254
@@ -320,12 +265,8 @@ nextSGEset:
320 thisxfer = sg_dma_len(sg); 265 thisxfer = sg_dma_len(sg);
321 266
322 v2 = sg_dma_address(sg); 267 v2 = sg_dma_address(sg);
323 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 268 ioc->add_sge(psge, sgflags | thisxfer, v2);
324 /* 269 sgeOffset += ioc->SGE_size;
325 sg = sg_next(sg);
326 psge += (sizeof(u32) + sizeof(dma_addr_t));
327 */
328 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
329 sg_done++; 270 sg_done++;
330 271
331 if (chainSge) { 272 if (chainSge) {
@@ -334,7 +275,8 @@ nextSGEset:
334 * Update the chain element 275 * Update the chain element
335 * Offset and Length fields. 276 * Offset and Length fields.
336 */ 277 */
337 mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 278 ioc->add_chain((char *)chainSge, 0, sgeOffset,
279 ioc->ChainBufferDMA + chain_dma_off);
338 } else { 280 } else {
339 /* The current buffer is the original MF 281 /* The current buffer is the original MF
340 * and there is no Chain buffer. 282 * and there is no Chain buffer.
@@ -367,7 +309,7 @@ nextSGEset:
367 * set properly). 309 * set properly).
368 */ 310 */
369 if (sg_done) { 311 if (sg_done) {
370 u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t))); 312 u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
371 sgflags = le32_to_cpu(*ptmp); 313 sgflags = le32_to_cpu(*ptmp);
372 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; 314 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
373 *ptmp = cpu_to_le32(sgflags); 315 *ptmp = cpu_to_le32(sgflags);
@@ -381,8 +323,9 @@ nextSGEset:
381 * Old chain element is now complete. 323 * Old chain element is now complete.
382 */ 324 */
383 u8 nextChain = (u8) (sgeOffset >> 2); 325 u8 nextChain = (u8) (sgeOffset >> 2);
384 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 326 sgeOffset += ioc->SGE_size;
385 mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 327 ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
328 ioc->ChainBufferDMA + chain_dma_off);
386 } else { 329 } else {
387 /* The original MF buffer requires a chain buffer - 330 /* The original MF buffer requires a chain buffer -
388 * set the offset. 331 * set the offset.
@@ -592,14 +535,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
592 } 535 }
593 536
594 scsi_print_command(sc); 537 scsi_print_command(sc);
595 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n", 538 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
596 ioc->name, pScsiReply->Bus, pScsiReply->TargetID); 539 ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
597 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, " 540 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
598 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow, 541 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
599 scsi_get_resid(sc)); 542 scsi_get_resid(sc));
600 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, " 543 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
601 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag), 544 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
602 le32_to_cpu(pScsiReply->TransferCount), sc->result); 545 le32_to_cpu(pScsiReply->TransferCount), sc->result);
546
603 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), " 547 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
604 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n", 548 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
605 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus, 549 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
@@ -654,16 +598,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
654 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); 598 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
655 req_idx_MR = (mr != NULL) ? 599 req_idx_MR = (mr != NULL) ?
656 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; 600 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
601
602 /* Special case, where already freed message frame is received from
603 * Firmware. It happens with Resetting IOC.
604 * Return immediately. Do not care
605 */
657 if ((req_idx != req_idx_MR) || 606 if ((req_idx != req_idx_MR) ||
658 (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) { 607 (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
659 printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
660 ioc->name);
661 printk (MYIOC_s_ERR_FMT
662 "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
663 ioc->name, req_idx, req_idx_MR, mf, mr,
664 mptscsih_get_scsi_lookup(ioc, req_idx_MR));
665 return 0; 608 return 0;
666 }
667 609
668 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); 610 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
669 if (sc == NULL) { 611 if (sc == NULL) {
@@ -810,12 +752,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
810 */ 752 */
811 753
812 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 754 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
813 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
814 /* Linux handles an unsolicited DID_RESET better 755 /* Linux handles an unsolicited DID_RESET better
815 * than an unsolicited DID_ABORT. 756 * than an unsolicited DID_ABORT.
816 */ 757 */
817 sc->result = DID_RESET << 16; 758 sc->result = DID_RESET << 16;
818 759
760 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
761 if (ioc->bus_type == FC)
762 sc->result = DID_ERROR << 16;
763 else
764 sc->result = DID_RESET << 16;
819 break; 765 break;
820 766
821 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 767 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -992,9 +938,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
992 scsi_dma_unmap(sc); 938 scsi_dma_unmap(sc);
993 sc->result = DID_RESET << 16; 939 sc->result = DID_RESET << 16;
994 sc->host_scribble = NULL; 940 sc->host_scribble = NULL;
995 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT 941 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
996 "completing cmds: fw_channel %d, fw_id %d, sc=%p," 942 "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
997 " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii); 943 "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
998 sc->scsi_done(sc); 944 sc->scsi_done(sc);
999 } 945 }
1000} 946}
@@ -1053,9 +999,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
1053 scsi_dma_unmap(sc); 999 scsi_dma_unmap(sc);
1054 sc->host_scribble = NULL; 1000 sc->host_scribble = NULL;
1055 sc->result = DID_NO_CONNECT << 16; 1001 sc->result = DID_NO_CONNECT << 16;
1056 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d," 1002 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
1057 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, 1003 MYIOC_s_FMT "completing cmds: fw_channel %d, "
1058 vdevice->vtarget->id, sc, mf, ii); 1004 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
1005 vdevice->vtarget->channel, vdevice->vtarget->id,
1006 sc, mf, ii));
1059 sc->scsi_done(sc); 1007 sc->scsi_done(sc);
1060 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1008 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1061 } 1009 }
@@ -1346,7 +1294,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1346 MPT_FRAME_HDR *mf; 1294 MPT_FRAME_HDR *mf;
1347 SCSIIORequest_t *pScsiReq; 1295 SCSIIORequest_t *pScsiReq;
1348 VirtDevice *vdevice = SCpnt->device->hostdata; 1296 VirtDevice *vdevice = SCpnt->device->hostdata;
1349 int lun;
1350 u32 datalen; 1297 u32 datalen;
1351 u32 scsictl; 1298 u32 scsictl;
1352 u32 scsidir; 1299 u32 scsidir;
@@ -1357,13 +1304,12 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1357 1304
1358 hd = shost_priv(SCpnt->device->host); 1305 hd = shost_priv(SCpnt->device->host);
1359 ioc = hd->ioc; 1306 ioc = hd->ioc;
1360 lun = SCpnt->device->lun;
1361 SCpnt->scsi_done = done; 1307 SCpnt->scsi_done = done;
1362 1308
1363 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", 1309 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
1364 ioc->name, SCpnt, done)); 1310 ioc->name, SCpnt, done));
1365 1311
1366 if (hd->resetPending) { 1312 if (ioc->taskmgmt_quiesce_io) {
1367 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", 1313 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1368 ioc->name, SCpnt)); 1314 ioc->name, SCpnt));
1369 return SCSI_MLQUEUE_HOST_BUSY; 1315 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1422,7 +1368,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1422 pScsiReq->CDBLength = SCpnt->cmd_len; 1368 pScsiReq->CDBLength = SCpnt->cmd_len;
1423 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; 1369 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1424 pScsiReq->Reserved = 0; 1370 pScsiReq->Reserved = 0;
1425 pScsiReq->MsgFlags = mpt_msg_flags(); 1371 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
1426 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN); 1372 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
1427 pScsiReq->Control = cpu_to_le32(scsictl); 1373 pScsiReq->Control = cpu_to_le32(scsictl);
1428 1374
@@ -1448,7 +1394,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1448 */ 1394 */
1449 if (datalen == 0) { 1395 if (datalen == 0) {
1450 /* Add a NULL SGE */ 1396 /* Add a NULL SGE */
1451 mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0, 1397 ioc->add_sge((char *)&pScsiReq->SGL,
1398 MPT_SGE_FLAGS_SSIMPLE_READ | 0,
1452 (dma_addr_t) -1); 1399 (dma_addr_t) -1);
1453 } else { 1400 } else {
1454 /* Add a 32 or 64 bit SGE */ 1401 /* Add a 32 or 64 bit SGE */
@@ -1528,8 +1475,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1528 1475
1529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1476/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1530/** 1477/**
1531 * mptscsih_TMHandler - Generic handler for SCSI Task Management. 1478 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1532 * @hd: Pointer to MPT SCSI HOST structure 1479 * @hd: Pointer to MPT_SCSI_HOST structure
1533 * @type: Task Management type 1480 * @type: Task Management type
1534 * @channel: channel number for task management 1481 * @channel: channel number for task management
1535 * @id: Logical Target ID for reset (if appropriate) 1482 * @id: Logical Target ID for reset (if appropriate)
@@ -1537,145 +1484,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1537 * @ctx2abort: Context for the task to be aborted (if appropriate) 1484 * @ctx2abort: Context for the task to be aborted (if appropriate)
1538 * @timeout: timeout for task management control 1485 * @timeout: timeout for task management control
1539 * 1486 *
1540 * Fall through to mpt_HardResetHandler if: not operational, too many 1487 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1541 * failed TM requests or handshake failure. 1488 * or a non-interrupt thread. In the former, must not call schedule().
1542 * 1489 *
1543 * Remark: Currently invoked from a non-interrupt thread (_bh). 1490 * Not all fields are meaningfull for all task types.
1544 * 1491 *
1545 * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC 1492 * Returns 0 for SUCCESS, or FAILED.
1546 * will be active.
1547 * 1493 *
1548 * Returns 0 for SUCCESS, or %FAILED.
1549 **/ 1494 **/
1550int 1495int
1551mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) 1496mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1497 int ctx2abort, ulong timeout)
1552{ 1498{
1553 MPT_ADAPTER *ioc; 1499 MPT_FRAME_HDR *mf;
1554 int rc = -1; 1500 SCSITaskMgmt_t *pScsiTm;
1501 int ii;
1502 int retval;
1503 MPT_ADAPTER *ioc = hd->ioc;
1504 unsigned long timeleft;
1505 u8 issue_hard_reset;
1555 u32 ioc_raw_state; 1506 u32 ioc_raw_state;
1556 unsigned long flags; 1507 unsigned long time_count;
1557
1558 ioc = hd->ioc;
1559 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name));
1560
1561 // SJR - CHECKME - Can we avoid this here?
1562 // (mpt_HardResetHandler has this check...)
1563 spin_lock_irqsave(&ioc->diagLock, flags);
1564 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
1565 spin_unlock_irqrestore(&ioc->diagLock, flags);
1566 return FAILED;
1567 }
1568 spin_unlock_irqrestore(&ioc->diagLock, flags);
1569
1570 /* Wait a fixed amount of time for the TM pending flag to be cleared.
1571 * If we time out and not bus reset, then we return a FAILED status
1572 * to the caller.
1573 * The call to mptscsih_tm_pending_wait() will set the pending flag
1574 * if we are
1575 * successful. Otherwise, reload the FW.
1576 */
1577 if (mptscsih_tm_pending_wait(hd) == FAILED) {
1578 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1579 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: "
1580 "Timed out waiting for last TM (%d) to complete! \n",
1581 ioc->name, hd->tmPending));
1582 return FAILED;
1583 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1584 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target "
1585 "reset: Timed out waiting for last TM (%d) "
1586 "to complete! \n", ioc->name,
1587 hd->tmPending));
1588 return FAILED;
1589 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
1590 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: "
1591 "Timed out waiting for last TM (%d) to complete! \n",
1592 ioc->name, hd->tmPending));
1593 return FAILED;
1594 }
1595 } else {
1596 spin_lock_irqsave(&ioc->FreeQlock, flags);
1597 hd->tmPending |= (1 << type);
1598 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1599 }
1600 1508
1509 issue_hard_reset = 0;
1601 ioc_raw_state = mpt_GetIocState(ioc, 0); 1510 ioc_raw_state = mpt_GetIocState(ioc, 0);
1602 1511
1603 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { 1512 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
1604 printk(MYIOC_s_WARN_FMT 1513 printk(MYIOC_s_WARN_FMT
1605 "TM Handler for type=%x: IOC Not operational (0x%x)!\n", 1514 "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
1606 ioc->name, type, ioc_raw_state); 1515 ioc->name, type, ioc_raw_state);
1607 printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name); 1516 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
1517 ioc->name, __func__);
1608 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) 1518 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
1609 printk(MYIOC_s_WARN_FMT "TMHandler: HardReset " 1519 printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
1610 "FAILED!!\n", ioc->name); 1520 "FAILED!!\n", ioc->name);
1611 return FAILED; 1521 return 0;
1612 } 1522 }
1613 1523
1614 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { 1524 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
1615 printk(MYIOC_s_WARN_FMT 1525 printk(MYIOC_s_WARN_FMT
1616 "TM Handler for type=%x: ioc_state: " 1526 "TaskMgmt type=%x: ioc_state: "
1617 "DOORBELL_ACTIVE (0x%x)!\n", 1527 "DOORBELL_ACTIVE (0x%x)!\n",
1618 ioc->name, type, ioc_raw_state); 1528 ioc->name, type, ioc_raw_state);
1619 return FAILED; 1529 return FAILED;
1620 } 1530 }
1621 1531
1622 /* Isse the Task Mgmt request. 1532 mutex_lock(&ioc->taskmgmt_cmds.mutex);
1623 */ 1533 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
1624 if (hd->hard_resets < -1) 1534 mf = NULL;
1625 hd->hard_resets++; 1535 retval = FAILED;
1626 1536 goto out;
1627 rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun, 1537 }
1628 ctx2abort, timeout);
1629 if (rc)
1630 printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n",
1631 ioc->name);
1632 else
1633 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n",
1634 ioc->name));
1635
1636 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1637 "TMHandler rc = %d!\n", ioc->name, rc));
1638
1639 return rc;
1640}
1641
1642
1643/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1644/**
1645 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1646 * @hd: Pointer to MPT_SCSI_HOST structure
1647 * @type: Task Management type
1648 * @channel: channel number for task management
1649 * @id: Logical Target ID for reset (if appropriate)
1650 * @lun: Logical Unit for reset (if appropriate)
1651 * @ctx2abort: Context for the task to be aborted (if appropriate)
1652 * @timeout: timeout for task management control
1653 *
1654 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1655 * or a non-interrupt thread. In the former, must not call schedule().
1656 *
1657 * Not all fields are meaningfull for all task types.
1658 *
1659 * Returns 0 for SUCCESS, or FAILED.
1660 *
1661 **/
1662static int
1663mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
1664{
1665 MPT_FRAME_HDR *mf;
1666 SCSITaskMgmt_t *pScsiTm;
1667 int ii;
1668 int retval;
1669 MPT_ADAPTER *ioc = hd->ioc;
1670 1538
1671 /* Return Fail to calling function if no message frames available. 1539 /* Return Fail to calling function if no message frames available.
1672 */ 1540 */
1673 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 1541 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
1674 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1542 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1675 ioc->name)); 1543 "TaskMgmt no msg frames!!\n", ioc->name));
1676 return FAILED; 1544 retval = FAILED;
1545 mpt_clear_taskmgmt_in_progress_flag(ioc);
1546 goto out;
1677 } 1547 }
1678 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 1548 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1679 ioc->name, mf)); 1549 ioc->name, mf));
1680 1550
1681 /* Format the Request 1551 /* Format the Request
@@ -1699,11 +1569,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1699 1569
1700 pScsiTm->TaskMsgContext = ctx2abort; 1570 pScsiTm->TaskMsgContext = ctx2abort;
1701 1571
1702 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " 1572 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
1703 "type=%d\n", ioc->name, ctx2abort, type)); 1573 "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
1574 type, timeout));
1704 1575
1705 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); 1576 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
1706 1577
1578 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1579 time_count = jiffies;
1707 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 1580 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
1708 (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 1581 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
1709 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1582 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
@@ -1711,47 +1584,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1711 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, 1584 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
1712 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 1585 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
1713 if (retval) { 1586 if (retval) {
1714 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" 1587 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1715 " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, 1588 "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
1716 ioc, mf, retval)); 1589 ioc->name, mf, retval));
1717 goto fail_out; 1590 mpt_free_msg_frame(ioc, mf);
1591 mpt_clear_taskmgmt_in_progress_flag(ioc);
1592 goto out;
1718 } 1593 }
1719 } 1594 }
1720 1595
1721 if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { 1596 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
1722 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" 1597 timeout*HZ);
1723 " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, 1598 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
1724 ioc, mf)); 1599 retval = FAILED;
1725 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 1600 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
1726 ioc->name)); 1601 "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
1727 retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 1602 mpt_clear_taskmgmt_in_progress_flag(ioc);
1728 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", 1603 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
1729 ioc->name, retval)); 1604 goto out;
1730 goto fail_out; 1605 issue_hard_reset = 1;
1606 goto out;
1731 } 1607 }
1732 1608
1733 /* 1609 retval = mptscsih_taskmgmt_reply(ioc, type,
1734 * Handle success case, see if theres a non-zero ioc_status. 1610 (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
1735 */
1736 if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS ||
1737 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1738 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1739 retval = 0;
1740 else
1741 retval = FAILED;
1742 1611
1743 return retval; 1612 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1613 "TaskMgmt completed (%d seconds)\n",
1614 ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
1744 1615
1745 fail_out: 1616 out:
1746 1617
1747 /* 1618 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1748 * Free task management mf, and corresponding tm flags 1619 if (issue_hard_reset) {
1749 */ 1620 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
1750 mpt_free_msg_frame(ioc, mf); 1621 ioc->name, __func__);
1751 hd->tmPending = 0; 1622 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
1752 hd->tmState = TM_STATE_NONE; 1623 mpt_free_msg_frame(ioc, mf);
1753 return FAILED; 1624 }
1625
1626 retval = (retval == 0) ? 0 : FAILED;
1627 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
1628 return retval;
1754} 1629}
1630EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
1755 1631
1756static int 1632static int
1757mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) 1633mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
@@ -1838,13 +1714,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1838 goto out; 1714 goto out;
1839 } 1715 }
1840 1716
1841 if (hd->resetPending) { 1717 if (ioc->timeouts < -1)
1842 retval = FAILED; 1718 ioc->timeouts++;
1843 goto out;
1844 }
1845
1846 if (hd->timeouts < -1)
1847 hd->timeouts++;
1848 1719
1849 if (mpt_fwfault_debug) 1720 if (mpt_fwfault_debug)
1850 mpt_halt_firmware(ioc); 1721 mpt_halt_firmware(ioc);
@@ -1861,22 +1732,30 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1861 1732
1862 hd->abortSCpnt = SCpnt; 1733 hd->abortSCpnt = SCpnt;
1863 1734
1864 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1735 retval = mptscsih_IssueTaskMgmt(hd,
1865 vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, 1736 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1866 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1737 vdevice->vtarget->channel,
1738 vdevice->vtarget->id, vdevice->lun,
1739 ctx2abort, mptscsih_get_tm_timeout(ioc));
1867 1740
1868 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && 1741 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx &&
1869 SCpnt->serial_number == sn) 1742 SCpnt->serial_number == sn) {
1743 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1744 "task abort: command still in active list! (sc=%p)\n",
1745 ioc->name, SCpnt));
1870 retval = FAILED; 1746 retval = FAILED;
1747 } else {
1748 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1749 "task abort: command cleared from active list! (sc=%p)\n",
1750 ioc->name, SCpnt));
1751 retval = SUCCESS;
1752 }
1871 1753
1872 out: 1754 out:
1873 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1755 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
1874 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1756 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
1875 1757
1876 if (retval == 0) 1758 return retval;
1877 return SUCCESS;
1878 else
1879 return FAILED;
1880} 1759}
1881 1760
1882/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1761/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1909,14 +1788,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1909 ioc->name, SCpnt); 1788 ioc->name, SCpnt);
1910 scsi_print_command(SCpnt); 1789 scsi_print_command(SCpnt);
1911 1790
1912 if (hd->resetPending) {
1913 retval = FAILED;
1914 goto out;
1915 }
1916
1917 vdevice = SCpnt->device->hostdata; 1791 vdevice = SCpnt->device->hostdata;
1918 if (!vdevice || !vdevice->vtarget) { 1792 if (!vdevice || !vdevice->vtarget) {
1919 retval = 0; 1793 retval = SUCCESS;
1920 goto out; 1794 goto out;
1921 } 1795 }
1922 1796
@@ -1927,9 +1801,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1927 goto out; 1801 goto out;
1928 } 1802 }
1929 1803
1930 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1804 retval = mptscsih_IssueTaskMgmt(hd,
1931 vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0, 1805 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1932 mptscsih_get_tm_timeout(ioc)); 1806 vdevice->vtarget->channel,
1807 vdevice->vtarget->id, 0, 0,
1808 mptscsih_get_tm_timeout(ioc));
1933 1809
1934 out: 1810 out:
1935 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n", 1811 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
@@ -1972,12 +1848,16 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1972 ioc->name, SCpnt); 1848 ioc->name, SCpnt);
1973 scsi_print_command(SCpnt); 1849 scsi_print_command(SCpnt);
1974 1850
1975 if (hd->timeouts < -1) 1851 if (ioc->timeouts < -1)
1976 hd->timeouts++; 1852 ioc->timeouts++;
1977 1853
1978 vdevice = SCpnt->device->hostdata; 1854 vdevice = SCpnt->device->hostdata;
1979 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1855 if (!vdevice || !vdevice->vtarget)
1980 vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc)); 1856 return SUCCESS;
1857 retval = mptscsih_IssueTaskMgmt(hd,
1858 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1859 vdevice->vtarget->channel, 0, 0, 0,
1860 mptscsih_get_tm_timeout(ioc));
1981 1861
1982 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n", 1862 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
1983 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1863 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2001,8 +1881,9 @@ int
2001mptscsih_host_reset(struct scsi_cmnd *SCpnt) 1881mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2002{ 1882{
2003 MPT_SCSI_HOST * hd; 1883 MPT_SCSI_HOST * hd;
2004 int retval; 1884 int status = SUCCESS;
2005 MPT_ADAPTER *ioc; 1885 MPT_ADAPTER *ioc;
1886 int retval;
2006 1887
2007 /* If we can't locate the host to reset, then we failed. */ 1888 /* If we can't locate the host to reset, then we failed. */
2008 if ((hd = shost_priv(SCpnt->device->host)) == NULL){ 1889 if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -2021,86 +1902,71 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2021 /* If our attempts to reset the host failed, then return a failed 1902 /* If our attempts to reset the host failed, then return a failed
2022 * status. The host will be taken off line by the SCSI mid-layer. 1903 * status. The host will be taken off line by the SCSI mid-layer.
2023 */ 1904 */
2024 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { 1905 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
2025 retval = FAILED; 1906 if (retval < 0)
2026 } else { 1907 status = FAILED;
2027 /* Make sure TM pending is cleared and TM state is set to 1908 else
2028 * NONE. 1909 status = SUCCESS;
2029 */
2030 retval = 0;
2031 hd->tmPending = 0;
2032 hd->tmState = TM_STATE_NONE;
2033 }
2034 1910
2035 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", 1911 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
2036 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1912 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
2037 1913
2038 return retval; 1914 return status;
2039} 1915}
2040 1916
2041/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2042/**
2043 * mptscsih_tm_pending_wait - wait for pending task management request to complete
2044 * @hd: Pointer to MPT host structure.
2045 *
2046 * Returns {SUCCESS,FAILED}.
2047 */
2048static int 1917static int
2049mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) 1918mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
1919 SCSITaskMgmtReply_t *pScsiTmReply)
2050{ 1920{
2051 unsigned long flags; 1921 u16 iocstatus;
2052 int loop_count = 4 * 10; /* Wait 10 seconds */ 1922 u32 termination_count;
2053 int status = FAILED; 1923 int retval;
2054 MPT_ADAPTER *ioc = hd->ioc;
2055 1924
2056 do { 1925 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
2057 spin_lock_irqsave(&ioc->FreeQlock, flags); 1926 retval = FAILED;
2058 if (hd->tmState == TM_STATE_NONE) { 1927 goto out;
2059 hd->tmState = TM_STATE_IN_PROGRESS; 1928 }
2060 hd->tmPending = 1;
2061 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2062 status = SUCCESS;
2063 break;
2064 }
2065 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2066 msleep(250);
2067 } while (--loop_count);
2068 1929
2069 return status; 1930 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2070}
2071 1931
2072/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1932 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2073/** 1933 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2074 * mptscsih_tm_wait_for_completion - wait for completion of TM task
2075 * @hd: Pointer to MPT host structure.
2076 * @timeout: timeout value
2077 *
2078 * Returns {SUCCESS,FAILED}.
2079 */
2080static int
2081mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
2082{
2083 unsigned long flags;
2084 int loop_count = 4 * timeout;
2085 int status = FAILED;
2086 MPT_ADAPTER *ioc = hd->ioc;
2087 1934
2088 do { 1935 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2089 spin_lock_irqsave(&ioc->FreeQlock, flags); 1936 "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
2090 if(hd->tmPending == 0) { 1937 "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
2091 status = SUCCESS; 1938 "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
2092 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 1939 pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
2093 break; 1940 le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
2094 } 1941 termination_count));
2095 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2096 msleep(250);
2097 } while (--loop_count);
2098 1942
2099 return status; 1943 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
1944 pScsiTmReply->ResponseCode)
1945 mptscsih_taskmgmt_response_code(ioc,
1946 pScsiTmReply->ResponseCode);
1947
1948 if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
1949 retval = 0;
1950 goto out;
1951 }
1952
1953 retval = FAILED;
1954 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1955 if (termination_count == 1)
1956 retval = 0;
1957 goto out;
1958 }
1959
1960 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1961 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1962 retval = 0;
1963
1964 out:
1965 return retval;
2100} 1966}
2101 1967
2102/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1968/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2103static void 1969void
2104mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) 1970mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2105{ 1971{
2106 char *desc; 1972 char *desc;
@@ -2134,6 +2000,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2134 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", 2000 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
2135 ioc->name, response_code, desc); 2001 ioc->name, response_code, desc);
2136} 2002}
2003EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
2137 2004
2138/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2005/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2139/** 2006/**
@@ -2150,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2150 * Returns 1 indicating alloc'd request frame ptr should be freed. 2017 * Returns 1 indicating alloc'd request frame ptr should be freed.
2151 **/ 2018 **/
2152int 2019int
2153mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2020mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
2021 MPT_FRAME_HDR *mr)
2154{ 2022{
2155 SCSITaskMgmtReply_t *pScsiTmReply; 2023 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2156 SCSITaskMgmt_t *pScsiTmReq; 2024 "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
2157 MPT_SCSI_HOST *hd;
2158 unsigned long flags;
2159 u16 iocstatus;
2160 u8 tmType;
2161 u32 termination_count;
2162
2163 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
2164 ioc->name, mf, mr));
2165 if (!ioc->sh) {
2166 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2167 "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name));
2168 return 1;
2169 }
2170
2171 if (mr == NULL) {
2172 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2173 "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf));
2174 return 1;
2175 }
2176
2177 hd = shost_priv(ioc->sh);
2178 pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
2179 pScsiTmReq = (SCSITaskMgmt_t*)mf;
2180 tmType = pScsiTmReq->TaskType;
2181 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2182 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2183 2025
2184 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && 2026 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2185 pScsiTmReply->ResponseCode)
2186 mptscsih_taskmgmt_response_code(ioc,
2187 pScsiTmReply->ResponseCode);
2188 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2189 2027
2190#ifdef CONFIG_FUSION_LOGGING 2028 if (!mr)
2191 if ((ioc->debug_level & MPT_DEBUG_REPLY) ||
2192 (ioc->debug_level & MPT_DEBUG_TM ))
2193 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
2194 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
2195 "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
2196 pScsiTmReply->TargetID, pScsiTmReq->TaskType,
2197 le16_to_cpu(pScsiTmReply->IOCStatus),
2198 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
2199 le32_to_cpu(pScsiTmReply->TerminationCount));
2200#endif
2201 if (!iocstatus) {
2202 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name));
2203 hd->abortSCpnt = NULL;
2204 goto out; 2029 goto out;
2205 }
2206
2207 /* Error? (anything non-zero?) */
2208
2209 /* clear flags and continue.
2210 */
2211 switch (tmType) {
2212
2213 case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2214 if (termination_count == 1)
2215 iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
2216 hd->abortSCpnt = NULL;
2217 break;
2218
2219 case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
2220
2221 /* If an internal command is present
2222 * or the TM failed - reload the FW.
2223 * FC FW may respond FAILED to an ABORT
2224 */
2225 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED ||
2226 hd->cmdPtr)
2227 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
2228 printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name);
2229 break;
2230
2231 case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2232 default:
2233 break;
2234 }
2235 2030
2031 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2032 memcpy(ioc->taskmgmt_cmds.reply, mr,
2033 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
2236 out: 2034 out:
2237 spin_lock_irqsave(&ioc->FreeQlock, flags); 2035 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
2238 hd->tmPending = 0; 2036 mpt_clear_taskmgmt_in_progress_flag(ioc);
2239 hd->tmState = TM_STATE_NONE; 2037 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2240 hd->tm_iocstatus = iocstatus; 2038 complete(&ioc->taskmgmt_cmds.done);
2241 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2039 return 1;
2242 2040 }
2243 return 1; 2041 return 0;
2244} 2042}
2245 2043
2246/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2044/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2290,8 +2088,10 @@ int
2290mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) 2088mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2291{ 2089{
2292 struct inactive_raid_component_info *component_info; 2090 struct inactive_raid_component_info *component_info;
2293 int i; 2091 int i, j;
2092 RaidPhysDiskPage1_t *phys_disk;
2294 int rc = 0; 2093 int rc = 0;
2094 int num_paths;
2295 2095
2296 if (!ioc->raid_data.pIocPg3) 2096 if (!ioc->raid_data.pIocPg3)
2297 goto out; 2097 goto out;
@@ -2303,6 +2103,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2303 } 2103 }
2304 } 2104 }
2305 2105
2106 if (ioc->bus_type != SAS)
2107 goto out;
2108
2109 /*
2110 * Check if dual path
2111 */
2112 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2113 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2114 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2115 if (num_paths < 2)
2116 continue;
2117 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2118 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2119 if (!phys_disk)
2120 continue;
2121 if ((mpt_raid_phys_disk_pg1(ioc,
2122 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2123 phys_disk))) {
2124 kfree(phys_disk);
2125 continue;
2126 }
2127 for (j = 0; j < num_paths; j++) {
2128 if ((phys_disk->Path[j].Flags &
2129 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2130 continue;
2131 if ((phys_disk->Path[j].Flags &
2132 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2133 continue;
2134 if ((id == phys_disk->Path[j].PhysDiskID) &&
2135 (channel == phys_disk->Path[j].PhysDiskBus)) {
2136 rc = 1;
2137 kfree(phys_disk);
2138 goto out;
2139 }
2140 }
2141 kfree(phys_disk);
2142 }
2143
2144
2306 /* 2145 /*
2307 * Check inactive list for matching phys disks 2146 * Check inactive list for matching phys disks
2308 */ 2147 */
@@ -2327,8 +2166,10 @@ u8
2327mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 2166mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2328{ 2167{
2329 struct inactive_raid_component_info *component_info; 2168 struct inactive_raid_component_info *component_info;
2330 int i; 2169 int i, j;
2170 RaidPhysDiskPage1_t *phys_disk;
2331 int rc = -ENXIO; 2171 int rc = -ENXIO;
2172 int num_paths;
2332 2173
2333 if (!ioc->raid_data.pIocPg3) 2174 if (!ioc->raid_data.pIocPg3)
2334 goto out; 2175 goto out;
@@ -2340,6 +2181,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2340 } 2181 }
2341 } 2182 }
2342 2183
2184 if (ioc->bus_type != SAS)
2185 goto out;
2186
2187 /*
2188 * Check if dual path
2189 */
2190 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2191 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2192 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2193 if (num_paths < 2)
2194 continue;
2195 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2196 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2197 if (!phys_disk)
2198 continue;
2199 if ((mpt_raid_phys_disk_pg1(ioc,
2200 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2201 phys_disk))) {
2202 kfree(phys_disk);
2203 continue;
2204 }
2205 for (j = 0; j < num_paths; j++) {
2206 if ((phys_disk->Path[j].Flags &
2207 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2208 continue;
2209 if ((phys_disk->Path[j].Flags &
2210 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2211 continue;
2212 if ((id == phys_disk->Path[j].PhysDiskID) &&
2213 (channel == phys_disk->Path[j].PhysDiskBus)) {
2214 rc = phys_disk->PhysDiskNum;
2215 kfree(phys_disk);
2216 goto out;
2217 }
2218 }
2219 kfree(phys_disk);
2220 }
2221
2343 /* 2222 /*
2344 * Check inactive list for matching phys disks 2223 * Check inactive list for matching phys disks
2345 */ 2224 */
@@ -2457,7 +2336,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2457 sdev->ppr, sdev->inquiry_len)); 2336 sdev->ppr, sdev->inquiry_len));
2458 2337
2459 vdevice->configured_lun = 1; 2338 vdevice->configured_lun = 1;
2460 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2461 2339
2462 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2340 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2463 "Queue depth=%d, tflags=%x\n", 2341 "Queue depth=%d, tflags=%x\n",
@@ -2469,6 +2347,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2469 ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2347 ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2470 vtarget->minSyncFactor)); 2348 vtarget->minSyncFactor));
2471 2349
2350 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2472 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2351 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2473 "tagged %d, simple %d, ordered %d\n", 2352 "tagged %d, simple %d, ordered %d\n",
2474 ioc->name,sdev->tagged_supported, sdev->simple_tags, 2353 ioc->name,sdev->tagged_supported, sdev->simple_tags,
@@ -2542,15 +2421,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2542} 2421}
2543 2422
2544/** 2423/**
2545 * mptscsih_get_scsi_lookup 2424 * mptscsih_get_scsi_lookup - retrieves scmd entry
2546 * @ioc: Pointer to MPT_ADAPTER structure 2425 * @ioc: Pointer to MPT_ADAPTER structure
2547 * @i: index into the array 2426 * @i: index into the array
2548 * 2427 *
2549 * retrieves scmd entry from ScsiLookup[] array list
2550 *
2551 * Returns the scsi_cmd pointer 2428 * Returns the scsi_cmd pointer
2552 **/ 2429 */
2553static struct scsi_cmnd * 2430struct scsi_cmnd *
2554mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) 2431mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2555{ 2432{
2556 unsigned long flags; 2433 unsigned long flags;
@@ -2562,15 +2439,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2562 2439
2563 return scmd; 2440 return scmd;
2564} 2441}
2442EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
2565 2443
2566/** 2444/**
2567 * mptscsih_getclear_scsi_lookup 2445 * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
2568 * @ioc: Pointer to MPT_ADAPTER structure 2446 * @ioc: Pointer to MPT_ADAPTER structure
2569 * @i: index into the array 2447 * @i: index into the array
2570 * 2448 *
2571 * retrieves and clears scmd entry from ScsiLookup[] array list
2572 *
2573 * Returns the scsi_cmd pointer 2449 * Returns the scsi_cmd pointer
2450 *
2574 **/ 2451 **/
2575static struct scsi_cmnd * 2452static struct scsi_cmnd *
2576mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) 2453mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2635,94 +2512,33 @@ int
2635mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 2512mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2636{ 2513{
2637 MPT_SCSI_HOST *hd; 2514 MPT_SCSI_HOST *hd;
2638 unsigned long flags;
2639 2515
2640 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2641 ": IOC %s_reset routed to SCSI host driver!\n",
2642 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
2643 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
2644
2645 /* If a FW reload request arrives after base installed but
2646 * before all scsi hosts have been attached, then an alt_ioc
2647 * may have a NULL sh pointer.
2648 */
2649 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) 2516 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
2650 return 0; 2517 return 0;
2651 else
2652 hd = shost_priv(ioc->sh);
2653
2654 if (reset_phase == MPT_IOC_SETUP_RESET) {
2655 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name));
2656
2657 /* Clean Up:
2658 * 1. Set Hard Reset Pending Flag
2659 * All new commands go to doneQ
2660 */
2661 hd->resetPending = 1;
2662
2663 } else if (reset_phase == MPT_IOC_PRE_RESET) {
2664 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name));
2665 2518
2666 /* 2. Flush running commands 2519 hd = shost_priv(ioc->sh);
2667 * Clean ScsiLookup (and associated memory) 2520 switch (reset_phase) {
2668 * AND clean mytaskQ 2521 case MPT_IOC_SETUP_RESET:
2669 */ 2522 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2670 2523 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
2671 /* 2b. Reply to OS all known outstanding I/O commands. 2524 break;
2672 */ 2525 case MPT_IOC_PRE_RESET:
2526 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2527 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
2673 mptscsih_flush_running_cmds(hd); 2528 mptscsih_flush_running_cmds(hd);
2674 2529 break;
2675 /* 2c. If there was an internal command that 2530 case MPT_IOC_POST_RESET:
2676 * has not completed, configuration or io request, 2531 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2677 * free these resources. 2532 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
2678 */ 2533 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
2679 if (hd->cmdPtr) { 2534 ioc->internal_cmds.status |=
2680 del_timer(&hd->timer); 2535 MPT_MGMT_STATUS_DID_IOCRESET;
2681 mpt_free_msg_frame(ioc, hd->cmdPtr); 2536 complete(&ioc->internal_cmds.done);
2682 }
2683
2684 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name));
2685
2686 } else {
2687 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name));
2688
2689 /* Once a FW reload begins, all new OS commands are
2690 * redirected to the doneQ w/ a reset status.
2691 * Init all control structures.
2692 */
2693
2694 /* 2. Chain Buffer initialization
2695 */
2696
2697 /* 4. Renegotiate to all devices, if SPI
2698 */
2699
2700 /* 5. Enable new commands to be posted
2701 */
2702 spin_lock_irqsave(&ioc->FreeQlock, flags);
2703 hd->tmPending = 0;
2704 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2705 hd->resetPending = 0;
2706 hd->tmState = TM_STATE_NONE;
2707
2708 /* 6. If there was an internal command,
2709 * wake this process up.
2710 */
2711 if (hd->cmdPtr) {
2712 /*
2713 * Wake up the original calling thread
2714 */
2715 hd->pLocal = &hd->localReply;
2716 hd->pLocal->completion = MPT_SCANDV_DID_RESET;
2717 hd->scandv_wait_done = 1;
2718 wake_up(&hd->scandv_waitq);
2719 hd->cmdPtr = NULL;
2720 } 2537 }
2721 2538 break;
2722 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name)); 2539 default:
2723 2540 break;
2724 } 2541 }
2725
2726 return 1; /* currently means nothing really */ 2542 return 1; /* currently means nothing really */
2727} 2543}
2728 2544
@@ -2730,55 +2546,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2730int 2546int
2731mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2547mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2732{ 2548{
2733 MPT_SCSI_HOST *hd;
2734 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 2549 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2735 2550
2736 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2551 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2737 ioc->name, event)); 2552 "MPT event (=%02Xh) routed to SCSI host driver!\n",
2738 2553 ioc->name, event));
2739 if (ioc->sh == NULL ||
2740 ((hd = shost_priv(ioc->sh)) == NULL))
2741 return 1;
2742
2743 switch (event) {
2744 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2745 /* FIXME! */
2746 break;
2747 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2748 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2749 if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
2750 hd->soft_resets++;
2751 break;
2752 case MPI_EVENT_LOGOUT: /* 09 */
2753 /* FIXME! */
2754 break;
2755
2756 case MPI_EVENT_RESCAN: /* 06 */
2757 break;
2758
2759 /*
2760 * CHECKME! Don't think we need to do
2761 * anything for these, but...
2762 */
2763 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
2764 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
2765 /*
2766 * CHECKME! Falling thru...
2767 */
2768 break;
2769
2770 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2771 break;
2772 2554
2773 case MPI_EVENT_NONE: /* 00 */ 2555 if ((event == MPI_EVENT_IOC_BUS_RESET ||
2774 case MPI_EVENT_LOG_DATA: /* 01 */ 2556 event == MPI_EVENT_EXT_BUS_RESET) &&
2775 case MPI_EVENT_STATE_CHANGE: /* 02 */ 2557 (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
2776 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 2558 ioc->soft_resets++;
2777 default:
2778 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n",
2779 ioc->name, event));
2780 break;
2781 }
2782 2559
2783 return 1; /* currently means nothing really */ 2560 return 1; /* currently means nothing really */
2784} 2561}
@@ -2809,153 +2586,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2809 * Used ONLY for DV and other internal commands. 2586 * Used ONLY for DV and other internal commands.
2810 */ 2587 */
2811int 2588int
2812mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2589mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2590 MPT_FRAME_HDR *reply)
2813{ 2591{
2814 MPT_SCSI_HOST *hd;
2815 SCSIIORequest_t *pReq; 2592 SCSIIORequest_t *pReq;
2816 int completionCode; 2593 SCSIIOReply_t *pReply;
2594 u8 cmd;
2817 u16 req_idx; 2595 u16 req_idx;
2596 u8 *sense_data;
2597 int sz;
2818 2598
2819 hd = shost_priv(ioc->sh); 2599 ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2820 2600 ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
2821 if ((mf == NULL) || 2601 if (!reply)
2822 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { 2602 goto out;
2823 printk(MYIOC_s_ERR_FMT
2824 "ScanDvComplete, %s req frame ptr! (=%p)\n",
2825 ioc->name, mf?"BAD":"NULL", (void *) mf);
2826 goto wakeup;
2827 }
2828
2829 del_timer(&hd->timer);
2830 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
2831 mptscsih_set_scsi_lookup(ioc, req_idx, NULL);
2832 pReq = (SCSIIORequest_t *) mf;
2833 2603
2834 if (mf != hd->cmdPtr) { 2604 pReply = (SCSIIOReply_t *) reply;
2835 printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", 2605 pReq = (SCSIIORequest_t *) req;
2836 ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); 2606 ioc->internal_cmds.completion_code =
2607 mptscsih_get_completion_code(ioc, req, reply);
2608 ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2609 memcpy(ioc->internal_cmds.reply, reply,
2610 min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
2611 cmd = reply->u.hdr.Function;
2612 if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
2613 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
2614 (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
2615 req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
2616 sense_data = ((u8 *)ioc->sense_buf_pool +
2617 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2618 sz = min_t(int, pReq->SenseBufferLength,
2619 MPT_SENSE_BUFFER_ALLOC);
2620 memcpy(ioc->internal_cmds.sense, sense_data, sz);
2837 } 2621 }
2838 hd->cmdPtr = NULL; 2622 out:
2839 2623 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
2840 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", 2624 return 0;
2841 ioc->name, mf, mr, req_idx)); 2625 ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2842 2626 complete(&ioc->internal_cmds.done);
2843 hd->pLocal = &hd->localReply;
2844 hd->pLocal->scsiStatus = 0;
2845
2846 /* If target struct exists, clear sense valid flag.
2847 */
2848 if (mr == NULL) {
2849 completionCode = MPT_SCANDV_GOOD;
2850 } else {
2851 SCSIIOReply_t *pReply;
2852 u16 status;
2853 u8 scsi_status;
2854
2855 pReply = (SCSIIOReply_t *) mr;
2856
2857 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2858 scsi_status = pReply->SCSIStatus;
2859
2860
2861 switch(status) {
2862
2863 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2864 completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
2865 break;
2866
2867 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2868 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2869 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2870 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2871 completionCode = MPT_SCANDV_DID_RESET;
2872 break;
2873
2874 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2875 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2876 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2877 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2878 ConfigReply_t *pr = (ConfigReply_t *)mr;
2879 completionCode = MPT_SCANDV_GOOD;
2880 hd->pLocal->header.PageVersion = pr->Header.PageVersion;
2881 hd->pLocal->header.PageLength = pr->Header.PageLength;
2882 hd->pLocal->header.PageNumber = pr->Header.PageNumber;
2883 hd->pLocal->header.PageType = pr->Header.PageType;
2884
2885 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2886 /* If the RAID Volume request is successful,
2887 * return GOOD, else indicate that
2888 * some type of error occurred.
2889 */
2890 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
2891 if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
2892 completionCode = MPT_SCANDV_GOOD;
2893 else
2894 completionCode = MPT_SCANDV_SOME_ERROR;
2895 memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
2896
2897 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
2898 u8 *sense_data;
2899 int sz;
2900
2901 /* save sense data in global structure
2902 */
2903 completionCode = MPT_SCANDV_SENSE;
2904 hd->pLocal->scsiStatus = scsi_status;
2905 sense_data = ((u8 *)ioc->sense_buf_pool +
2906 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2907
2908 sz = min_t(int, pReq->SenseBufferLength,
2909 SCSI_STD_SENSE_BYTES);
2910 memcpy(hd->pLocal->sense, sense_data, sz);
2911
2912 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n",
2913 ioc->name, sense_data));
2914 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2915 if (pReq->CDB[0] == INQUIRY)
2916 completionCode = MPT_SCANDV_ISSUE_SENSE;
2917 else
2918 completionCode = MPT_SCANDV_DID_RESET;
2919 }
2920 else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2921 completionCode = MPT_SCANDV_DID_RESET;
2922 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2923 completionCode = MPT_SCANDV_DID_RESET;
2924 else {
2925 completionCode = MPT_SCANDV_GOOD;
2926 hd->pLocal->scsiStatus = scsi_status;
2927 }
2928 break;
2929
2930 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2931 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2932 completionCode = MPT_SCANDV_DID_RESET;
2933 else
2934 completionCode = MPT_SCANDV_SOME_ERROR;
2935 break;
2936
2937 default:
2938 completionCode = MPT_SCANDV_SOME_ERROR;
2939 break;
2940
2941 } /* switch(status) */
2942
2943 } /* end of address reply case */
2944
2945 hd->pLocal->completion = completionCode;
2946
2947 /* MF and RF are freed in mpt_interrupt
2948 */
2949wakeup:
2950 /* Free Chain buffers (will never chain) in scan or dv */
2951 //mptscsih_freeChainBuffers(ioc, req_idx);
2952
2953 /*
2954 * Wake up the original calling thread
2955 */
2956 hd->scandv_wait_done = 1;
2957 wake_up(&hd->scandv_waitq);
2958
2959 return 1; 2627 return 1;
2960} 2628}
2961 2629
@@ -3004,6 +2672,95 @@ mptscsih_timer_expired(unsigned long data)
3004 return; 2672 return;
3005} 2673}
3006 2674
2675/**
2676 * mptscsih_get_completion_code -
2677 * @ioc: Pointer to MPT_ADAPTER structure
2678 * @reply:
2679 * @cmd:
2680 *
2681 **/
2682static int
2683mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2684 MPT_FRAME_HDR *reply)
2685{
2686 SCSIIOReply_t *pReply;
2687 MpiRaidActionReply_t *pr;
2688 u8 scsi_status;
2689 u16 status;
2690 int completion_code;
2691
2692 pReply = (SCSIIOReply_t *)reply;
2693 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2694 scsi_status = pReply->SCSIStatus;
2695
2696 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2697 "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
2698 "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
2699 scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
2700
2701 switch (status) {
2702
2703 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2704 completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
2705 break;
2706
2707 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2708 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2709 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2710 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2711 completion_code = MPT_SCANDV_DID_RESET;
2712 break;
2713
2714 case MPI_IOCSTATUS_BUSY:
2715 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2716 completion_code = MPT_SCANDV_BUSY;
2717 break;
2718
2719 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2720 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2721 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2722 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2723 completion_code = MPT_SCANDV_GOOD;
2724 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2725 pr = (MpiRaidActionReply_t *)reply;
2726 if (le16_to_cpu(pr->ActionStatus) ==
2727 MPI_RAID_ACTION_ASTATUS_SUCCESS)
2728 completion_code = MPT_SCANDV_GOOD;
2729 else
2730 completion_code = MPT_SCANDV_SOME_ERROR;
2731 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
2732 completion_code = MPT_SCANDV_SENSE;
2733 else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2734 if (req->u.scsireq.CDB[0] == INQUIRY)
2735 completion_code = MPT_SCANDV_ISSUE_SENSE;
2736 else
2737 completion_code = MPT_SCANDV_DID_RESET;
2738 } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2739 completion_code = MPT_SCANDV_DID_RESET;
2740 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2741 completion_code = MPT_SCANDV_DID_RESET;
2742 else if (scsi_status == MPI_SCSI_STATUS_BUSY)
2743 completion_code = MPT_SCANDV_BUSY;
2744 else
2745 completion_code = MPT_SCANDV_GOOD;
2746 break;
2747
2748 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2749 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2750 completion_code = MPT_SCANDV_DID_RESET;
2751 else
2752 completion_code = MPT_SCANDV_SOME_ERROR;
2753 break;
2754 default:
2755 completion_code = MPT_SCANDV_SOME_ERROR;
2756 break;
2757
2758 } /* switch(status) */
2759
2760 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2761 " completionCode set to %08xh\n", ioc->name, completion_code));
2762 return completion_code;
2763}
3007 2764
3008/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2765/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3009/** 2766/**
@@ -3030,22 +2787,27 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3030{ 2787{
3031 MPT_FRAME_HDR *mf; 2788 MPT_FRAME_HDR *mf;
3032 SCSIIORequest_t *pScsiReq; 2789 SCSIIORequest_t *pScsiReq;
3033 SCSIIORequest_t ReqCopy;
3034 int my_idx, ii, dir; 2790 int my_idx, ii, dir;
3035 int rc, cmdTimeout; 2791 int timeout;
3036 int in_isr;
3037 char cmdLen; 2792 char cmdLen;
3038 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; 2793 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3039 char cmd = io->cmd; 2794 u8 cmd = io->cmd;
3040 MPT_ADAPTER *ioc = hd->ioc; 2795 MPT_ADAPTER *ioc = hd->ioc;
2796 int ret = 0;
2797 unsigned long timeleft;
2798 unsigned long flags;
3041 2799
3042 in_isr = in_interrupt(); 2800 /* don't send internal command during diag reset */
3043 if (in_isr) { 2801 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3044 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", 2802 if (ioc->ioc_reset_in_progress) {
3045 ioc->name)); 2803 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3046 return -EPERM; 2804 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2805 "%s: busy with host reset\n", ioc->name, __func__));
2806 return MPT_SCANDV_BUSY;
3047 } 2807 }
2808 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3048 2809
2810 mutex_lock(&ioc->internal_cmds.mutex);
3049 2811
3050 /* Set command specific information 2812 /* Set command specific information
3051 */ 2813 */
@@ -3055,13 +2817,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3055 dir = MPI_SCSIIO_CONTROL_READ; 2817 dir = MPI_SCSIIO_CONTROL_READ;
3056 CDB[0] = cmd; 2818 CDB[0] = cmd;
3057 CDB[4] = io->size; 2819 CDB[4] = io->size;
3058 cmdTimeout = 10; 2820 timeout = 10;
3059 break; 2821 break;
3060 2822
3061 case TEST_UNIT_READY: 2823 case TEST_UNIT_READY:
3062 cmdLen = 6; 2824 cmdLen = 6;
3063 dir = MPI_SCSIIO_CONTROL_READ; 2825 dir = MPI_SCSIIO_CONTROL_READ;
3064 cmdTimeout = 10; 2826 timeout = 10;
3065 break; 2827 break;
3066 2828
3067 case START_STOP: 2829 case START_STOP:
@@ -3069,7 +2831,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3069 dir = MPI_SCSIIO_CONTROL_READ; 2831 dir = MPI_SCSIIO_CONTROL_READ;
3070 CDB[0] = cmd; 2832 CDB[0] = cmd;
3071 CDB[4] = 1; /*Spin up the disk */ 2833 CDB[4] = 1; /*Spin up the disk */
3072 cmdTimeout = 15; 2834 timeout = 15;
3073 break; 2835 break;
3074 2836
3075 case REQUEST_SENSE: 2837 case REQUEST_SENSE:
@@ -3077,7 +2839,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3077 CDB[0] = cmd; 2839 CDB[0] = cmd;
3078 CDB[4] = io->size; 2840 CDB[4] = io->size;
3079 dir = MPI_SCSIIO_CONTROL_READ; 2841 dir = MPI_SCSIIO_CONTROL_READ;
3080 cmdTimeout = 10; 2842 timeout = 10;
3081 break; 2843 break;
3082 2844
3083 case READ_BUFFER: 2845 case READ_BUFFER:
@@ -3096,7 +2858,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3096 CDB[6] = (io->size >> 16) & 0xFF; 2858 CDB[6] = (io->size >> 16) & 0xFF;
3097 CDB[7] = (io->size >> 8) & 0xFF; 2859 CDB[7] = (io->size >> 8) & 0xFF;
3098 CDB[8] = io->size & 0xFF; 2860 CDB[8] = io->size & 0xFF;
3099 cmdTimeout = 10; 2861 timeout = 10;
3100 break; 2862 break;
3101 2863
3102 case WRITE_BUFFER: 2864 case WRITE_BUFFER:
@@ -3111,21 +2873,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3111 CDB[6] = (io->size >> 16) & 0xFF; 2873 CDB[6] = (io->size >> 16) & 0xFF;
3112 CDB[7] = (io->size >> 8) & 0xFF; 2874 CDB[7] = (io->size >> 8) & 0xFF;
3113 CDB[8] = io->size & 0xFF; 2875 CDB[8] = io->size & 0xFF;
3114 cmdTimeout = 10; 2876 timeout = 10;
3115 break; 2877 break;
3116 2878
3117 case RESERVE: 2879 case RESERVE:
3118 cmdLen = 6; 2880 cmdLen = 6;
3119 dir = MPI_SCSIIO_CONTROL_READ; 2881 dir = MPI_SCSIIO_CONTROL_READ;
3120 CDB[0] = cmd; 2882 CDB[0] = cmd;
3121 cmdTimeout = 10; 2883 timeout = 10;
3122 break; 2884 break;
3123 2885
3124 case RELEASE: 2886 case RELEASE:
3125 cmdLen = 6; 2887 cmdLen = 6;
3126 dir = MPI_SCSIIO_CONTROL_READ; 2888 dir = MPI_SCSIIO_CONTROL_READ;
3127 CDB[0] = cmd; 2889 CDB[0] = cmd;
3128 cmdTimeout = 10; 2890 timeout = 10;
3129 break; 2891 break;
3130 2892
3131 case SYNCHRONIZE_CACHE: 2893 case SYNCHRONIZE_CACHE:
@@ -3133,20 +2895,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3133 dir = MPI_SCSIIO_CONTROL_READ; 2895 dir = MPI_SCSIIO_CONTROL_READ;
3134 CDB[0] = cmd; 2896 CDB[0] = cmd;
3135// CDB[1] = 0x02; /* set immediate bit */ 2897// CDB[1] = 0x02; /* set immediate bit */
3136 cmdTimeout = 10; 2898 timeout = 10;
3137 break; 2899 break;
3138 2900
3139 default: 2901 default:
3140 /* Error Case */ 2902 /* Error Case */
3141 return -EFAULT; 2903 ret = -EFAULT;
2904 goto out;
3142 } 2905 }
3143 2906
3144 /* Get and Populate a free Frame 2907 /* Get and Populate a free Frame
2908 * MsgContext set in mpt_get_msg_frame call
3145 */ 2909 */
3146 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 2910 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
3147 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", 2911 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
3148 ioc->name)); 2912 ioc->name, __func__));
3149 return -EBUSY; 2913 ret = MPT_SCANDV_BUSY;
2914 goto out;
3150 } 2915 }
3151 2916
3152 pScsiReq = (SCSIIORequest_t *) mf; 2917 pScsiReq = (SCSIIORequest_t *) mf;
@@ -3172,7 +2937,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3172 2937
3173 pScsiReq->Reserved = 0; 2938 pScsiReq->Reserved = 0;
3174 2939
3175 pScsiReq->MsgFlags = mpt_msg_flags(); 2940 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
3176 /* MsgContext set in mpt_get_msg_fram call */ 2941 /* MsgContext set in mpt_get_msg_fram call */
3177 2942
3178 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN); 2943 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
@@ -3184,74 +2949,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3184 2949
3185 if (cmd == REQUEST_SENSE) { 2950 if (cmd == REQUEST_SENSE) {
3186 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); 2951 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
3187 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", 2952 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3188 ioc->name, cmd)); 2953 "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
3189 } 2954 }
3190 2955
3191 for (ii=0; ii < 16; ii++) 2956 for (ii = 0; ii < 16; ii++)
3192 pScsiReq->CDB[ii] = CDB[ii]; 2957 pScsiReq->CDB[ii] = CDB[ii];
3193 2958
3194 pScsiReq->DataLength = cpu_to_le32(io->size); 2959 pScsiReq->DataLength = cpu_to_le32(io->size);
3195 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma 2960 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
3196 + (my_idx * MPT_SENSE_BUFFER_ALLOC)); 2961 + (my_idx * MPT_SENSE_BUFFER_ALLOC));
3197 2962
3198 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", 2963 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3199 ioc->name, cmd, io->channel, io->id, io->lun)); 2964 "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
2965 ioc->name, __func__, cmd, io->channel, io->id, io->lun));
3200 2966
3201 if (dir == MPI_SCSIIO_CONTROL_READ) { 2967 if (dir == MPI_SCSIIO_CONTROL_READ)
3202 mpt_add_sge((char *) &pScsiReq->SGL, 2968 ioc->add_sge((char *) &pScsiReq->SGL,
3203 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, 2969 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
3204 io->data_dma); 2970 else
3205 } else { 2971 ioc->add_sge((char *) &pScsiReq->SGL,
3206 mpt_add_sge((char *) &pScsiReq->SGL, 2972 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
3207 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
3208 io->data_dma);
3209 }
3210
3211 /* The ISR will free the request frame, but we need
3212 * the information to initialize the target. Duplicate.
3213 */
3214 memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
3215
3216 /* Issue this command after:
3217 * finish init
3218 * add timer
3219 * Wait until the reply has been received
3220 * ScsiScanDvCtx callback function will
3221 * set hd->pLocal;
3222 * set scandv_wait_done and call wake_up
3223 */
3224 hd->pLocal = NULL;
3225 hd->timer.expires = jiffies + HZ*cmdTimeout;
3226 hd->scandv_wait_done = 0;
3227
3228 /* Save cmd pointer, for resource free if timeout or
3229 * FW reload occurs
3230 */
3231 hd->cmdPtr = mf;
3232 2973
3233 add_timer(&hd->timer); 2974 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
3234 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 2975 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
3235 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 2976 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
3236 2977 timeout*HZ);
3237 if (hd->pLocal) { 2978 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
3238 rc = hd->pLocal->completion; 2979 ret = MPT_SCANDV_DID_RESET;
3239 hd->pLocal->skip = 0; 2980 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3240 2981 "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
3241 /* Always set fatal error codes in some cases. 2982 cmd));
3242 */ 2983 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
3243 if (rc == MPT_SCANDV_SELECTION_TIMEOUT) 2984 mpt_free_msg_frame(ioc, mf);
3244 rc = -ENXIO; 2985 goto out;
3245 else if (rc == MPT_SCANDV_SOME_ERROR) 2986 }
3246 rc = -rc; 2987 if (!timeleft) {
3247 } else { 2988 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
3248 rc = -EFAULT; 2989 ioc->name, __func__);
3249 /* This should never happen. */ 2990 mpt_HardResetHandler(ioc, CAN_SLEEP);
3250 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", 2991 mpt_free_msg_frame(ioc, mf);
3251 ioc->name)); 2992 }
2993 goto out;
3252 } 2994 }
3253 2995
3254 return rc; 2996 ret = ioc->internal_cmds.completion_code;
2997 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
2998 ioc->name, __func__, ret));
2999
3000 out:
3001 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
3002 mutex_unlock(&ioc->internal_cmds.mutex);
3003 return ret;
3255} 3004}
3256 3005
3257/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3006/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3491,6 +3240,7 @@ struct device_attribute *mptscsih_host_attrs[] = {
3491 &dev_attr_debug_level, 3240 &dev_attr_debug_level,
3492 NULL, 3241 NULL,
3493}; 3242};
3243
3494EXPORT_SYMBOL(mptscsih_host_attrs); 3244EXPORT_SYMBOL(mptscsih_host_attrs);
3495 3245
3496EXPORT_SYMBOL(mptscsih_remove); 3246EXPORT_SYMBOL(mptscsih_remove);
@@ -3516,6 +3266,5 @@ EXPORT_SYMBOL(mptscsih_event_process);
3516EXPORT_SYMBOL(mptscsih_ioc_reset); 3266EXPORT_SYMBOL(mptscsih_ioc_reset);
3517EXPORT_SYMBOL(mptscsih_change_queue_depth); 3267EXPORT_SYMBOL(mptscsih_change_queue_depth);
3518EXPORT_SYMBOL(mptscsih_timer_expired); 3268EXPORT_SYMBOL(mptscsih_timer_expired);
3519EXPORT_SYMBOL(mptscsih_TMHandler);
3520 3269
3521/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3270/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 319aa3033371..eb3f677528ac 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -60,6 +60,7 @@
60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) 60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
61#define MPT_SCANDV_ISSUE_SENSE (0x00000010) 61#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
62#define MPT_SCANDV_FALLBACK (0x00000020) 62#define MPT_SCANDV_FALLBACK (0x00000020)
63#define MPT_SCANDV_BUSY (0x00000040)
63 64
64#define MPT_SCANDV_MAX_RETRIES (10) 65#define MPT_SCANDV_MAX_RETRIES (10)
65 66
@@ -89,6 +90,7 @@
89 90
90#endif 91#endif
91 92
93
92typedef struct _internal_cmd { 94typedef struct _internal_cmd {
93 char *data; /* data pointer */ 95 char *data; /* data pointer */
94 dma_addr_t data_dma; /* data dma address */ 96 dma_addr_t data_dma; /* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
112extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); 114extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
113extern const char * mptscsih_info(struct Scsi_Host *SChost); 115extern const char * mptscsih_info(struct Scsi_Host *SChost);
114extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); 116extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
117extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
118 u8 id, int lun, int ctx2abort, ulong timeout);
115extern void mptscsih_slave_destroy(struct scsi_device *device); 119extern void mptscsih_slave_destroy(struct scsi_device *device);
116extern int mptscsih_slave_configure(struct scsi_device *device); 120extern int mptscsih_slave_configure(struct scsi_device *device);
117extern int mptscsih_abort(struct scsi_cmnd * SCpnt); 121extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
126extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
127extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
128extern void mptscsih_timer_expired(unsigned long data); 132extern void mptscsih_timer_expired(unsigned long data);
129extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
130extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
131extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
132extern struct device_attribute *mptscsih_host_attrs[]; 135extern struct device_attribute *mptscsih_host_attrs[];
136extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
137extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 61620144e49c..c5b808fd55ba 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | 300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
301 (IOCPage4Ptr->Header.PageLength + ii) * 4; 301 (IOCPage4Ptr->Header.PageLength + ii) * 4;
302 302
303 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); 303 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
304 304
305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", 306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; 614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
615} 615}
616 616
617static int 617int
618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) 618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
619{ 619{
620 MPT_ADAPTER *ioc = hd->ioc;
620 MpiRaidActionRequest_t *pReq; 621 MpiRaidActionRequest_t *pReq;
621 MPT_FRAME_HDR *mf; 622 MPT_FRAME_HDR *mf;
622 MPT_ADAPTER *ioc = hd->ioc; 623 int ret;
624 unsigned long timeleft;
625
626 mutex_lock(&ioc->internal_cmds.mutex);
623 627
624 /* Get and Populate a free Frame 628 /* Get and Populate a free Frame
625 */ 629 */
626 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 630 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
627 ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", 631 dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
628 ioc->name)); 632 "%s: no msg frames!\n", ioc->name, __func__));
629 return -EAGAIN; 633 ret = -EAGAIN;
634 goto out;
630 } 635 }
631 pReq = (MpiRaidActionRequest_t *)mf; 636 pReq = (MpiRaidActionRequest_t *)mf;
632 if (quiesce) 637 if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
643 pReq->Reserved2 = 0; 648 pReq->Reserved2 = 0;
644 pReq->ActionDataWord = 0; /* Reserved for this action */ 649 pReq->ActionDataWord = 0; /* Reserved for this action */
645 650
646 mpt_add_sge((char *)&pReq->ActionDataSGE, 651 ioc->add_sge((char *)&pReq->ActionDataSGE,
647 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); 652 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
648 653
649 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", 654 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
650 ioc->name, pReq->Action, channel, id)); 655 ioc->name, pReq->Action, channel, id));
651 656
652 hd->pLocal = NULL; 657 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
653 hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
654 hd->scandv_wait_done = 0;
655
656 /* Save cmd pointer, for resource free if timeout or
657 * FW reload occurs
658 */
659 hd->cmdPtr = mf;
660
661 add_timer(&hd->timer);
662 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 658 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
663 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 659 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
660 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
661 ret = -ETIME;
662 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
663 ioc->name, __func__));
664 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
665 goto out;
666 if (!timeleft) {
667 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
668 ioc->name, __func__);
669 mpt_HardResetHandler(ioc, CAN_SLEEP);
670 mpt_free_msg_frame(ioc, mf);
671 }
672 goto out;
673 }
664 674
665 if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) 675 ret = ioc->internal_cmds.completion_code;
666 return -1;
667 676
668 return 0; 677 out:
678 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
679 mutex_unlock(&ioc->internal_cmds.mutex);
680 return ret;
669} 681}
670 682
671static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, 683static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1423 * A slightly different algorithm is required for 1435 * A slightly different algorithm is required for
1424 * 64bit SGEs. 1436 * 64bit SGEs.
1425 */ 1437 */
1426 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1438 scale = ioc->req_sz/ioc->SGE_size;
1427 if (sizeof(dma_addr_t) == sizeof(u64)) { 1439 if (ioc->sg_addr_size == sizeof(u64)) {
1428 numSGE = (scale - 1) * 1440 numSGE = (scale - 1) *
1429 (ioc->facts.MaxChainDepth-1) + scale + 1441 (ioc->facts.MaxChainDepth-1) + scale +
1430 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1442 (ioc->req_sz - 60) / ioc->SGE_size;
1431 sizeof(u32));
1432 } else { 1443 } else {
1433 numSGE = 1 + (scale - 1) * 1444 numSGE = 1 + (scale - 1) *
1434 (ioc->facts.MaxChainDepth-1) + scale + 1445 (ioc->facts.MaxChainDepth-1) + scale +
1435 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1446 (ioc->req_sz - 64) / ioc->SGE_size;
1436 sizeof(u32));
1437 } 1447 }
1438 1448
1439 if (numSGE < sh->sg_tablesize) { 1449 if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1464 1474
1465 /* Clear the TM flags 1475 /* Clear the TM flags
1466 */ 1476 */
1467 hd->tmPending = 0;
1468 hd->tmState = TM_STATE_NONE;
1469 hd->resetPending = 0;
1470 hd->abortSCpnt = NULL; 1477 hd->abortSCpnt = NULL;
1471 1478
1472 /* Clear the pointer used to store 1479 /* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1493 mpt_saf_te)); 1500 mpt_saf_te));
1494 ioc->spi_data.noQas = 0; 1501 ioc->spi_data.noQas = 0;
1495 1502
1496 init_waitqueue_head(&hd->scandv_waitq);
1497 hd->scandv_wait_done = 0;
1498 hd->last_queue_full = 0; 1503 hd->last_queue_full = 0;
1499 hd->spi_pending = 0; 1504 hd->spi_pending = 0;
1500 1505
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1514 * issue internal bus reset 1519 * issue internal bus reset
1515 */ 1520 */
1516 if (ioc->spi_data.bus_reset) 1521 if (ioc->spi_data.bus_reset)
1517 mptscsih_TMHandler(hd, 1522 mptscsih_IssueTaskMgmt(hd,
1518 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1523 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1519 0, 0, 0, 0, 5); 1524 0, 0, 0, 0, 5);
1520 1525
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d1ef75..f3c4a3b910bb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2264,6 +2264,17 @@ config BNX2
2264 To compile this driver as a module, choose M here: the module 2264 To compile this driver as a module, choose M here: the module
2265 will be called bnx2. This is recommended. 2265 will be called bnx2. This is recommended.
2266 2266
2267config CNIC
2268 tristate "Broadcom CNIC support"
2269 depends on BNX2
2270 depends on UIO
2271 help
2272 This driver supports offload features of Broadcom NetXtremeII
2273 gigabit Ethernet cards.
2274
2275 To compile this driver as a module, choose M here: the module
2276 will be called cnic. This is recommended.
2277
2267config SPIDER_NET 2278config SPIDER_NET
2268 tristate "Spider Gigabit Ethernet driver" 2279 tristate "Spider Gigabit Ethernet driver"
2269 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) 2280 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a1c25cb4669f..db30ebd7b262 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
73obj-$(CONFIG_FEALNX) += fealnx.o 73obj-$(CONFIG_FEALNX) += fealnx.o
74obj-$(CONFIG_TIGON3) += tg3.o 74obj-$(CONFIG_TIGON3) += tg3.o
75obj-$(CONFIG_BNX2) += bnx2.o 75obj-$(CONFIG_BNX2) += bnx2.o
76obj-$(CONFIG_CNIC) += cnic.o
76obj-$(CONFIG_BNX2X) += bnx2x.o 77obj-$(CONFIG_BNX2X) += bnx2x.o
77bnx2x-objs := bnx2x_main.o bnx2x_link.o 78bnx2x-objs := bnx2x_main.o bnx2x_link.o
78spidernet-y += spider_net.o spider_net_ethtool.o 79spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b0cb29d4cc01..3f5fcb0156a1 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,10 @@
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51 51
52#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53#define BCM_CNIC 1
54#include "cnic_if.h"
55#endif
52#include "bnx2.h" 56#include "bnx2.h"
53#include "bnx2_fw.h" 57#include "bnx2_fw.h"
54 58
@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
315 spin_unlock_bh(&bp->indirect_lock); 319 spin_unlock_bh(&bp->indirect_lock);
316} 320}
317 321
322#ifdef BCM_CNIC
323static int
324bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
325{
326 struct bnx2 *bp = netdev_priv(dev);
327 struct drv_ctl_io *io = &info->data.io;
328
329 switch (info->cmd) {
330 case DRV_CTL_IO_WR_CMD:
331 bnx2_reg_wr_ind(bp, io->offset, io->data);
332 break;
333 case DRV_CTL_IO_RD_CMD:
334 io->data = bnx2_reg_rd_ind(bp, io->offset);
335 break;
336 case DRV_CTL_CTX_WR_CMD:
337 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
338 break;
339 default:
340 return -EINVAL;
341 }
342 return 0;
343}
344
345static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
346{
347 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
348 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
349 int sb_id;
350
351 if (bp->flags & BNX2_FLAG_USING_MSIX) {
352 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
353 bnapi->cnic_present = 0;
354 sb_id = bp->irq_nvecs;
355 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
356 } else {
357 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_tag = bnapi->last_status_idx;
359 bnapi->cnic_present = 1;
360 sb_id = 0;
361 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
362 }
363
364 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
365 cp->irq_arr[0].status_blk = (void *)
366 ((unsigned long) bnapi->status_blk.msi +
367 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
368 cp->irq_arr[0].status_blk_num = sb_id;
369 cp->num_irq = 1;
370}
371
372static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
373 void *data)
374{
375 struct bnx2 *bp = netdev_priv(dev);
376 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
377
378 if (ops == NULL)
379 return -EINVAL;
380
381 if (cp->drv_state & CNIC_DRV_STATE_REGD)
382 return -EBUSY;
383
384 bp->cnic_data = data;
385 rcu_assign_pointer(bp->cnic_ops, ops);
386
387 cp->num_irq = 0;
388 cp->drv_state = CNIC_DRV_STATE_REGD;
389
390 bnx2_setup_cnic_irq_info(bp);
391
392 return 0;
393}
394
395static int bnx2_unregister_cnic(struct net_device *dev)
396{
397 struct bnx2 *bp = netdev_priv(dev);
398 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
399 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
400
401 cp->drv_state = 0;
402 bnapi->cnic_present = 0;
403 rcu_assign_pointer(bp->cnic_ops, NULL);
404 synchronize_rcu();
405 return 0;
406}
407
408struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
409{
410 struct bnx2 *bp = netdev_priv(dev);
411 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412
413 cp->drv_owner = THIS_MODULE;
414 cp->chip_id = bp->chip_id;
415 cp->pdev = bp->pdev;
416 cp->io_base = bp->regview;
417 cp->drv_ctl = bnx2_drv_ctl;
418 cp->drv_register_cnic = bnx2_register_cnic;
419 cp->drv_unregister_cnic = bnx2_unregister_cnic;
420
421 return cp;
422}
423EXPORT_SYMBOL(bnx2_cnic_probe);
424
425static void
426bnx2_cnic_stop(struct bnx2 *bp)
427{
428 struct cnic_ops *c_ops;
429 struct cnic_ctl_info info;
430
431 rcu_read_lock();
432 c_ops = rcu_dereference(bp->cnic_ops);
433 if (c_ops) {
434 info.cmd = CNIC_CTL_STOP_CMD;
435 c_ops->cnic_ctl(bp->cnic_data, &info);
436 }
437 rcu_read_unlock();
438}
439
440static void
441bnx2_cnic_start(struct bnx2 *bp)
442{
443 struct cnic_ops *c_ops;
444 struct cnic_ctl_info info;
445
446 rcu_read_lock();
447 c_ops = rcu_dereference(bp->cnic_ops);
448 if (c_ops) {
449 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
450 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
451
452 bnapi->cnic_tag = bnapi->last_status_idx;
453 }
454 info.cmd = CNIC_CTL_START_CMD;
455 c_ops->cnic_ctl(bp->cnic_data, &info);
456 }
457 rcu_read_unlock();
458}
459
460#else
461
462static void
463bnx2_cnic_stop(struct bnx2 *bp)
464{
465}
466
467static void
468bnx2_cnic_start(struct bnx2 *bp)
469{
470}
471
472#endif
473
318static int 474static int
319bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) 475bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
320{ 476{
@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
488static void 644static void
489bnx2_netif_stop(struct bnx2 *bp) 645bnx2_netif_stop(struct bnx2 *bp)
490{ 646{
647 bnx2_cnic_stop(bp);
491 bnx2_disable_int_sync(bp); 648 bnx2_disable_int_sync(bp);
492 if (netif_running(bp->dev)) { 649 if (netif_running(bp->dev)) {
493 bnx2_napi_disable(bp); 650 bnx2_napi_disable(bp);
@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
504 netif_tx_wake_all_queues(bp->dev); 661 netif_tx_wake_all_queues(bp->dev);
505 bnx2_napi_enable(bp); 662 bnx2_napi_enable(bp);
506 bnx2_enable_int(bp); 663 bnx2_enable_int(bp);
664 bnx2_cnic_start(bp);
507 } 665 }
508 } 666 }
509} 667}
@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3164 if (bnx2_has_fast_work(bnapi)) 3322 if (bnx2_has_fast_work(bnapi))
3165 return 1; 3323 return 1;
3166 3324
3325#ifdef BCM_CNIC
3326 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3327 return 1;
3328#endif
3329
3167 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != 3330 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3168 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) 3331 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3169 return 1; 3332 return 1;
@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
3193 bp->idle_chk_status_idx = bnapi->last_status_idx; 3356 bp->idle_chk_status_idx = bnapi->last_status_idx;
3194} 3357}
3195 3358
3359#ifdef BCM_CNIC
3360static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3361{
3362 struct cnic_ops *c_ops;
3363
3364 if (!bnapi->cnic_present)
3365 return;
3366
3367 rcu_read_lock();
3368 c_ops = rcu_dereference(bp->cnic_ops);
3369 if (c_ops)
3370 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3371 bnapi->status_blk.msi);
3372 rcu_read_unlock();
3373}
3374#endif
3375
3196static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3376static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3197{ 3377{
3198 struct status_block *sblk = bnapi->status_blk.msi; 3378 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3267 3447
3268 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3448 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3269 3449
3450#ifdef BCM_CNIC
3451 bnx2_poll_cnic(bp, bnapi);
3452#endif
3453
3270 /* bnapi->last_status_idx is used below to tell the hw how 3454 /* bnapi->last_status_idx is used below to tell the hw how
3271 * much work has been processed, so we must read it before 3455 * much work has been processed, so we must read it before
3272 * checking for more work. 3456 * checking for more work.
@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
4632 val = REG_RD(bp, BNX2_MQ_CONFIG); 4816 val = REG_RD(bp, BNX2_MQ_CONFIG);
4633 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4817 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4634 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4818 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4635 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) 4819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4636 val |= BNX2_MQ_CONFIG_HALT_DIS; 4820 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4821 if (CHIP_REV(bp) == CHIP_REV_Ax)
4822 val |= BNX2_MQ_CONFIG_HALT_DIS;
4823 }
4637 4824
4638 REG_WR(bp, BNX2_MQ_CONFIG, val); 4825 REG_WR(bp, BNX2_MQ_CONFIG, val);
4639 4826
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7471 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7658 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7472 7659
7473 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7660 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7474 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); 7661 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7475 dev->mem_end = dev->mem_start + mem_len; 7662 dev->mem_end = dev->mem_start + mem_len;
7476 dev->irq = pdev->irq; 7663 dev->irq = pdev->irq;
7477 7664
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5b570e17c839..a1ff739bc9b5 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,6 +361,9 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
364#define BNX2_L2CTX_HOST_BSEQ 0x00000008 367#define BNX2_L2CTX_HOST_BSEQ 0x00000008
365#define BNX2_L2CTX_NX_BSEQ 0x0000000c 368#define BNX2_L2CTX_NX_BSEQ 0x0000000c
366#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
5900#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) 5903#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
5901 5904
5902#define BNX2_RXP_SCRATCH 0x000e0000 5905#define BNX2_RXP_SCRATCH 0x000e0000
5906#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
5903#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 5907#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
5904#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c 5908#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
5905#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 5909#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
@@ -6678,6 +6682,11 @@ struct bnx2_napi {
6678 u32 last_status_idx; 6682 u32 last_status_idx;
6679 u32 int_num; 6683 u32 int_num;
6680 6684
6685#ifdef BCM_CNIC
6686 u32 cnic_tag;
6687 int cnic_present;
6688#endif
6689
6681 struct bnx2_rx_ring_info rx_ring; 6690 struct bnx2_rx_ring_info rx_ring;
6682 struct bnx2_tx_ring_info tx_ring; 6691 struct bnx2_tx_ring_info tx_ring;
6683}; 6692};
@@ -6727,6 +6736,11 @@ struct bnx2 {
6727 int tx_ring_size; 6736 int tx_ring_size;
6728 u32 tx_wake_thresh; 6737 u32 tx_wake_thresh;
6729 6738
6739#ifdef BCM_CNIC
6740 struct cnic_ops *cnic_ops;
6741 void *cnic_data;
6742#endif
6743
6730 /* End of fields used in the performance code paths. */ 6744 /* End of fields used in the performance code paths. */
6731 6745
6732 unsigned int current_interval; 6746 unsigned int current_interval;
@@ -6885,6 +6899,10 @@ struct bnx2 {
6885 6899
6886 u32 idle_chk_status_idx; 6900 u32 idle_chk_status_idx;
6887 6901
6902#ifdef BCM_CNIC
6903 struct cnic_eth_dev cnic_eth_dev;
6904#endif
6905
6888 const struct firmware *mips_firmware; 6906 const struct firmware *mips_firmware;
6889 const struct firmware *rv2p_firmware; 6907 const struct firmware *rv2p_firmware;
6890}; 6908};
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 000000000000..8d740376bbd2
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2711 @@
1/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
13#include <linux/module.h>
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/uio_driver.h>
23#include <linux/in.h>
24#include <linux/dma-mapping.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
29#define BCM_VLAN 1
30#endif
31#include <net/ip.h>
32#include <net/tcp.h>
33#include <net/route.h>
34#include <net/ipv6.h>
35#include <net/ip6_route.h>
36#include <scsi/iscsi_if.h>
37
38#include "cnic_if.h"
39#include "bnx2.h"
40#include "cnic.h"
41#include "cnic_defs.h"
42
43#define DRV_MODULE_NAME "cnic"
44#define PFX DRV_MODULE_NAME ": "
45
46static char version[] __devinitdata =
47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
48
49MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
50 "Chen (zongxi@broadcom.com");
51MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(CNIC_MODULE_VERSION);
54
55static LIST_HEAD(cnic_dev_list);
56static DEFINE_RWLOCK(cnic_dev_lock);
57static DEFINE_MUTEX(cnic_lock);
58
59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
60
61static int cnic_service_bnx2(void *, void *);
62static int cnic_ctl(void *, struct cnic_ctl_info *);
63
64static struct cnic_ops cnic_bnx2_ops = {
65 .cnic_owner = THIS_MODULE,
66 .cnic_handler = cnic_service_bnx2,
67 .cnic_ctl = cnic_ctl,
68};
69
70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
71static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
72static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
73static int cnic_cm_set_pg(struct cnic_sock *);
74
75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
76{
77 struct cnic_dev *dev = uinfo->priv;
78 struct cnic_local *cp = dev->cnic_priv;
79
80 if (!capable(CAP_NET_ADMIN))
81 return -EPERM;
82
83 if (cp->uio_dev != -1)
84 return -EBUSY;
85
86 cp->uio_dev = iminor(inode);
87
88 cnic_shutdown_bnx2_rx_ring(dev);
89
90 cnic_init_bnx2_tx_ring(dev);
91 cnic_init_bnx2_rx_ring(dev);
92
93 return 0;
94}
95
96static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
97{
98 struct cnic_dev *dev = uinfo->priv;
99 struct cnic_local *cp = dev->cnic_priv;
100
101 cp->uio_dev = -1;
102 return 0;
103}
104
105static inline void cnic_hold(struct cnic_dev *dev)
106{
107 atomic_inc(&dev->ref_count);
108}
109
110static inline void cnic_put(struct cnic_dev *dev)
111{
112 atomic_dec(&dev->ref_count);
113}
114
115static inline void csk_hold(struct cnic_sock *csk)
116{
117 atomic_inc(&csk->ref_count);
118}
119
120static inline void csk_put(struct cnic_sock *csk)
121{
122 atomic_dec(&csk->ref_count);
123}
124
125static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
126{
127 struct cnic_dev *cdev;
128
129 read_lock(&cnic_dev_lock);
130 list_for_each_entry(cdev, &cnic_dev_list, list) {
131 if (netdev == cdev->netdev) {
132 cnic_hold(cdev);
133 read_unlock(&cnic_dev_lock);
134 return cdev;
135 }
136 }
137 read_unlock(&cnic_dev_lock);
138 return NULL;
139}
140
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{
143 struct cnic_local *cp = dev->cnic_priv;
144 struct cnic_eth_dev *ethdev = cp->ethdev;
145 struct drv_ctl_info info;
146 struct drv_ctl_io *io = &info.data.io;
147
148 info.cmd = DRV_CTL_CTX_WR_CMD;
149 io->cid_addr = cid_addr;
150 io->offset = off;
151 io->data = val;
152 ethdev->drv_ctl(dev->netdev, &info);
153}
154
155static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
156{
157 struct cnic_local *cp = dev->cnic_priv;
158 struct cnic_eth_dev *ethdev = cp->ethdev;
159 struct drv_ctl_info info;
160 struct drv_ctl_io *io = &info.data.io;
161
162 info.cmd = DRV_CTL_IO_WR_CMD;
163 io->offset = off;
164 io->data = val;
165 ethdev->drv_ctl(dev->netdev, &info);
166}
167
168static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
169{
170 struct cnic_local *cp = dev->cnic_priv;
171 struct cnic_eth_dev *ethdev = cp->ethdev;
172 struct drv_ctl_info info;
173 struct drv_ctl_io *io = &info.data.io;
174
175 info.cmd = DRV_CTL_IO_RD_CMD;
176 io->offset = off;
177 ethdev->drv_ctl(dev->netdev, &info);
178 return io->data;
179}
180
181static int cnic_in_use(struct cnic_sock *csk)
182{
183 return test_bit(SK_F_INUSE, &csk->flags);
184}
185
186static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191
192 info.cmd = DRV_CTL_COMPLETION_CMD;
193 info.data.comp.comp_count = count;
194 ethdev->drv_ctl(dev->netdev, &info);
195}
196
197static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
198 struct cnic_sock *csk)
199{
200 struct iscsi_path path_req;
201 char *buf = NULL;
202 u16 len = 0;
203 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
204 struct cnic_ulp_ops *ulp_ops;
205
206 if (cp->uio_dev == -1)
207 return -ENODEV;
208
209 if (csk) {
210 len = sizeof(path_req);
211 buf = (char *) &path_req;
212 memset(&path_req, 0, len);
213
214 msg_type = ISCSI_KEVENT_PATH_REQ;
215 path_req.handle = (u64) csk->l5_cid;
216 if (test_bit(SK_F_IPV6, &csk->flags)) {
217 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
218 sizeof(struct in6_addr));
219 path_req.ip_addr_len = 16;
220 } else {
221 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
222 sizeof(struct in_addr));
223 path_req.ip_addr_len = 4;
224 }
225 path_req.vlan_id = csk->vlan_id;
226 path_req.pmtu = csk->mtu;
227 }
228
229 rcu_read_lock();
230 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
231 if (ulp_ops)
232 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
233 rcu_read_unlock();
234 return 0;
235}
236
237static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
238 char *buf, u16 len)
239{
240 int rc = -EINVAL;
241
242 switch (msg_type) {
243 case ISCSI_UEVENT_PATH_UPDATE: {
244 struct cnic_local *cp;
245 u32 l5_cid;
246 struct cnic_sock *csk;
247 struct iscsi_path *path_resp;
248
249 if (len < sizeof(*path_resp))
250 break;
251
252 path_resp = (struct iscsi_path *) buf;
253 cp = dev->cnic_priv;
254 l5_cid = (u32) path_resp->handle;
255 if (l5_cid >= MAX_CM_SK_TBL_SZ)
256 break;
257
258 csk = &cp->csk_tbl[l5_cid];
259 csk_hold(csk);
260 if (cnic_in_use(csk)) {
261 memcpy(csk->ha, path_resp->mac_addr, 6);
262 if (test_bit(SK_F_IPV6, &csk->flags))
263 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
264 sizeof(struct in6_addr));
265 else
266 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
267 sizeof(struct in_addr));
268 if (is_valid_ether_addr(csk->ha))
269 cnic_cm_set_pg(csk);
270 }
271 csk_put(csk);
272 rc = 0;
273 }
274 }
275
276 return rc;
277}
278
279static int cnic_offld_prep(struct cnic_sock *csk)
280{
281 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
282 return 0;
283
284 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
285 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
286 return 0;
287 }
288
289 return 1;
290}
291
292static int cnic_close_prep(struct cnic_sock *csk)
293{
294 clear_bit(SK_F_CONNECT_START, &csk->flags);
295 smp_mb__after_clear_bit();
296
297 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
298 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
299 msleep(1);
300
301 return 1;
302 }
303 return 0;
304}
305
306static int cnic_abort_prep(struct cnic_sock *csk)
307{
308 clear_bit(SK_F_CONNECT_START, &csk->flags);
309 smp_mb__after_clear_bit();
310
311 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
312 msleep(1);
313
314 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
315 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
316 return 1;
317 }
318
319 return 0;
320}
321
322int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
323{
324 struct cnic_dev *dev;
325
326 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
327 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
328 ulp_type);
329 return -EINVAL;
330 }
331 mutex_lock(&cnic_lock);
332 if (cnic_ulp_tbl[ulp_type]) {
333 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
334 "been registered\n", ulp_type);
335 mutex_unlock(&cnic_lock);
336 return -EBUSY;
337 }
338
339 read_lock(&cnic_dev_lock);
340 list_for_each_entry(dev, &cnic_dev_list, list) {
341 struct cnic_local *cp = dev->cnic_priv;
342
343 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
344 }
345 read_unlock(&cnic_dev_lock);
346
347 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
348 mutex_unlock(&cnic_lock);
349
350 /* Prevent race conditions with netdev_event */
351 rtnl_lock();
352 read_lock(&cnic_dev_lock);
353 list_for_each_entry(dev, &cnic_dev_list, list) {
354 struct cnic_local *cp = dev->cnic_priv;
355
356 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
357 ulp_ops->cnic_init(dev);
358 }
359 read_unlock(&cnic_dev_lock);
360 rtnl_unlock();
361
362 return 0;
363}
364
365int cnic_unregister_driver(int ulp_type)
366{
367 struct cnic_dev *dev;
368
369 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
370 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
371 ulp_type);
372 return -EINVAL;
373 }
374 mutex_lock(&cnic_lock);
375 if (!cnic_ulp_tbl[ulp_type]) {
376 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
377 "been registered\n", ulp_type);
378 goto out_unlock;
379 }
380 read_lock(&cnic_dev_lock);
381 list_for_each_entry(dev, &cnic_dev_list, list) {
382 struct cnic_local *cp = dev->cnic_priv;
383
384 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
385 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
386 "still has devices registered\n", ulp_type);
387 read_unlock(&cnic_dev_lock);
388 goto out_unlock;
389 }
390 }
391 read_unlock(&cnic_dev_lock);
392
393 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
394
395 mutex_unlock(&cnic_lock);
396 synchronize_rcu();
397 return 0;
398
399out_unlock:
400 mutex_unlock(&cnic_lock);
401 return -EINVAL;
402}
403
404static int cnic_start_hw(struct cnic_dev *);
405static void cnic_stop_hw(struct cnic_dev *);
406
407static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
408 void *ulp_ctx)
409{
410 struct cnic_local *cp = dev->cnic_priv;
411 struct cnic_ulp_ops *ulp_ops;
412
413 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
414 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
415 ulp_type);
416 return -EINVAL;
417 }
418 mutex_lock(&cnic_lock);
419 if (cnic_ulp_tbl[ulp_type] == NULL) {
420 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
421 "has not been registered\n", ulp_type);
422 mutex_unlock(&cnic_lock);
423 return -EAGAIN;
424 }
425 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
426 printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
427 "been registered to this device\n", ulp_type);
428 mutex_unlock(&cnic_lock);
429 return -EBUSY;
430 }
431
432 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
433 cp->ulp_handle[ulp_type] = ulp_ctx;
434 ulp_ops = cnic_ulp_tbl[ulp_type];
435 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
436 cnic_hold(dev);
437
438 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
439 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
440 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
441
442 mutex_unlock(&cnic_lock);
443
444 return 0;
445
446}
447EXPORT_SYMBOL(cnic_register_driver);
448
449static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
450{
451 struct cnic_local *cp = dev->cnic_priv;
452
453 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
454 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
455 ulp_type);
456 return -EINVAL;
457 }
458 mutex_lock(&cnic_lock);
459 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
460 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
461 cnic_put(dev);
462 } else {
463 printk(KERN_ERR PFX "cnic_unregister_device: device not "
464 "registered to this ulp type %d\n", ulp_type);
465 mutex_unlock(&cnic_lock);
466 return -EINVAL;
467 }
468 mutex_unlock(&cnic_lock);
469
470 synchronize_rcu();
471
472 return 0;
473}
474EXPORT_SYMBOL(cnic_unregister_driver);
475
476static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
477{
478 id_tbl->start = start_id;
479 id_tbl->max = size;
480 id_tbl->next = 0;
481 spin_lock_init(&id_tbl->lock);
482 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
483 if (!id_tbl->table)
484 return -ENOMEM;
485
486 return 0;
487}
488
489static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
490{
491 kfree(id_tbl->table);
492 id_tbl->table = NULL;
493}
494
495static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
496{
497 int ret = -1;
498
499 id -= id_tbl->start;
500 if (id >= id_tbl->max)
501 return ret;
502
503 spin_lock(&id_tbl->lock);
504 if (!test_bit(id, id_tbl->table)) {
505 set_bit(id, id_tbl->table);
506 ret = 0;
507 }
508 spin_unlock(&id_tbl->lock);
509 return ret;
510}
511
512/* Returns -1 if not successful */
513static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
514{
515 u32 id;
516
517 spin_lock(&id_tbl->lock);
518 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
519 if (id >= id_tbl->max) {
520 id = -1;
521 if (id_tbl->next != 0) {
522 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
523 if (id >= id_tbl->next)
524 id = -1;
525 }
526 }
527
528 if (id < id_tbl->max) {
529 set_bit(id, id_tbl->table);
530 id_tbl->next = (id + 1) & (id_tbl->max - 1);
531 id += id_tbl->start;
532 }
533
534 spin_unlock(&id_tbl->lock);
535
536 return id;
537}
538
539static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
540{
541 if (id == -1)
542 return;
543
544 id -= id_tbl->start;
545 if (id >= id_tbl->max)
546 return;
547
548 clear_bit(id, id_tbl->table);
549}
550
551static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
552{
553 int i;
554
555 if (!dma->pg_arr)
556 return;
557
558 for (i = 0; i < dma->num_pages; i++) {
559 if (dma->pg_arr[i]) {
560 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
561 dma->pg_arr[i], dma->pg_map_arr[i]);
562 dma->pg_arr[i] = NULL;
563 }
564 }
565 if (dma->pgtbl) {
566 pci_free_consistent(dev->pcidev, dma->pgtbl_size,
567 dma->pgtbl, dma->pgtbl_map);
568 dma->pgtbl = NULL;
569 }
570 kfree(dma->pg_arr);
571 dma->pg_arr = NULL;
572 dma->num_pages = 0;
573}
574
575static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
576{
577 int i;
578 u32 *page_table = dma->pgtbl;
579
580 for (i = 0; i < dma->num_pages; i++) {
581 /* Each entry needs to be in big endian format. */
582 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
583 page_table++;
584 *page_table = (u32) dma->pg_map_arr[i];
585 page_table++;
586 }
587}
588
589static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
590 int pages, int use_pg_tbl)
591{
592 int i, size;
593 struct cnic_local *cp = dev->cnic_priv;
594
595 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
596 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
597 if (dma->pg_arr == NULL)
598 return -ENOMEM;
599
600 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
601 dma->num_pages = pages;
602
603 for (i = 0; i < pages; i++) {
604 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
605 BCM_PAGE_SIZE,
606 &dma->pg_map_arr[i]);
607 if (dma->pg_arr[i] == NULL)
608 goto error;
609 }
610 if (!use_pg_tbl)
611 return 0;
612
613 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
614 ~(BCM_PAGE_SIZE - 1);
615 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
616 &dma->pgtbl_map);
617 if (dma->pgtbl == NULL)
618 goto error;
619
620 cp->setup_pgtbl(dev, dma);
621
622 return 0;
623
624error:
625 cnic_free_dma(dev, dma);
626 return -ENOMEM;
627}
628
629static void cnic_free_resc(struct cnic_dev *dev)
630{
631 struct cnic_local *cp = dev->cnic_priv;
632 int i = 0;
633
634 if (cp->cnic_uinfo) {
635 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
636 while (cp->uio_dev != -1 && i < 15) {
637 msleep(100);
638 i++;
639 }
640 uio_unregister_device(cp->cnic_uinfo);
641 kfree(cp->cnic_uinfo);
642 cp->cnic_uinfo = NULL;
643 }
644
645 if (cp->l2_buf) {
646 pci_free_consistent(dev->pcidev, cp->l2_buf_size,
647 cp->l2_buf, cp->l2_buf_map);
648 cp->l2_buf = NULL;
649 }
650
651 if (cp->l2_ring) {
652 pci_free_consistent(dev->pcidev, cp->l2_ring_size,
653 cp->l2_ring, cp->l2_ring_map);
654 cp->l2_ring = NULL;
655 }
656
657 for (i = 0; i < cp->ctx_blks; i++) {
658 if (cp->ctx_arr[i].ctx) {
659 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
660 cp->ctx_arr[i].ctx,
661 cp->ctx_arr[i].mapping);
662 cp->ctx_arr[i].ctx = NULL;
663 }
664 }
665 kfree(cp->ctx_arr);
666 cp->ctx_arr = NULL;
667 cp->ctx_blks = 0;
668
669 cnic_free_dma(dev, &cp->gbl_buf_info);
670 cnic_free_dma(dev, &cp->conn_buf_info);
671 cnic_free_dma(dev, &cp->kwq_info);
672 cnic_free_dma(dev, &cp->kcq_info);
673 kfree(cp->iscsi_tbl);
674 cp->iscsi_tbl = NULL;
675 kfree(cp->ctx_tbl);
676 cp->ctx_tbl = NULL;
677
678 cnic_free_id_tbl(&cp->cid_tbl);
679}
680
681static int cnic_alloc_context(struct cnic_dev *dev)
682{
683 struct cnic_local *cp = dev->cnic_priv;
684
685 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
686 int i, k, arr_size;
687
688 cp->ctx_blk_size = BCM_PAGE_SIZE;
689 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
690 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
691 sizeof(struct cnic_ctx);
692 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
693 if (cp->ctx_arr == NULL)
694 return -ENOMEM;
695
696 k = 0;
697 for (i = 0; i < 2; i++) {
698 u32 j, reg, off, lo, hi;
699
700 if (i == 0)
701 off = BNX2_PG_CTX_MAP;
702 else
703 off = BNX2_ISCSI_CTX_MAP;
704
705 reg = cnic_reg_rd_ind(dev, off);
706 lo = reg >> 16;
707 hi = reg & 0xffff;
708 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
709 cp->ctx_arr[k].cid = j;
710 }
711
712 cp->ctx_blks = k;
713 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
714 cp->ctx_blks = 0;
715 return -ENOMEM;
716 }
717
718 for (i = 0; i < cp->ctx_blks; i++) {
719 cp->ctx_arr[i].ctx =
720 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
721 &cp->ctx_arr[i].mapping);
722 if (cp->ctx_arr[i].ctx == NULL)
723 return -ENOMEM;
724 }
725 }
726 return 0;
727}
728
729static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
730{
731 struct cnic_local *cp = dev->cnic_priv;
732 struct uio_info *uinfo;
733 int ret;
734
735 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
736 if (ret)
737 goto error;
738 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
739
740 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
741 if (ret)
742 goto error;
743 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
744
745 ret = cnic_alloc_context(dev);
746 if (ret)
747 goto error;
748
749 cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
750 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
751 &cp->l2_ring_map);
752 if (!cp->l2_ring)
753 goto error;
754
755 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
756 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
757 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
758 &cp->l2_buf_map);
759 if (!cp->l2_buf)
760 goto error;
761
762 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
763 if (!uinfo)
764 goto error;
765
766 uinfo->mem[0].addr = dev->netdev->base_addr;
767 uinfo->mem[0].internal_addr = dev->regview;
768 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
769 uinfo->mem[0].memtype = UIO_MEM_PHYS;
770
771 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
772 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
773 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
774 else
775 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
776 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
777
778 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
779 uinfo->mem[2].size = cp->l2_ring_size;
780 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
781
782 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
783 uinfo->mem[3].size = cp->l2_buf_size;
784 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
785
786 uinfo->name = "bnx2_cnic";
787 uinfo->version = CNIC_MODULE_VERSION;
788 uinfo->irq = UIO_IRQ_CUSTOM;
789
790 uinfo->open = cnic_uio_open;
791 uinfo->release = cnic_uio_close;
792
793 uinfo->priv = dev;
794
795 ret = uio_register_device(&dev->pcidev->dev, uinfo);
796 if (ret) {
797 kfree(uinfo);
798 goto error;
799 }
800
801 cp->cnic_uinfo = uinfo;
802
803 return 0;
804
805error:
806 cnic_free_resc(dev);
807 return ret;
808}
809
810static inline u32 cnic_kwq_avail(struct cnic_local *cp)
811{
812 return cp->max_kwq_idx -
813 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
814}
815
816static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
817 u32 num_wqes)
818{
819 struct cnic_local *cp = dev->cnic_priv;
820 struct kwqe *prod_qe;
821 u16 prod, sw_prod, i;
822
823 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
824 return -EAGAIN; /* bnx2 is down */
825
826 spin_lock_bh(&cp->cnic_ulp_lock);
827 if (num_wqes > cnic_kwq_avail(cp) &&
828 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
829 spin_unlock_bh(&cp->cnic_ulp_lock);
830 return -EAGAIN;
831 }
832
833 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
834
835 prod = cp->kwq_prod_idx;
836 sw_prod = prod & MAX_KWQ_IDX;
837 for (i = 0; i < num_wqes; i++) {
838 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
839 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
840 prod++;
841 sw_prod = prod & MAX_KWQ_IDX;
842 }
843 cp->kwq_prod_idx = prod;
844
845 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
846
847 spin_unlock_bh(&cp->cnic_ulp_lock);
848 return 0;
849}
850
851static void service_kcqes(struct cnic_dev *dev, int num_cqes)
852{
853 struct cnic_local *cp = dev->cnic_priv;
854 int i, j;
855
856 i = 0;
857 j = 1;
858 while (num_cqes) {
859 struct cnic_ulp_ops *ulp_ops;
860 int ulp_type;
861 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
862 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
863
864 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
865 cnic_kwq_completion(dev, 1);
866
867 while (j < num_cqes) {
868 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
869
870 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
871 break;
872
873 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
874 cnic_kwq_completion(dev, 1);
875 j++;
876 }
877
878 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
879 ulp_type = CNIC_ULP_RDMA;
880 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
881 ulp_type = CNIC_ULP_ISCSI;
882 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
883 ulp_type = CNIC_ULP_L4;
884 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
885 goto end;
886 else {
887 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
888 dev->netdev->name, kcqe_op_flag);
889 goto end;
890 }
891
892 rcu_read_lock();
893 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
894 if (likely(ulp_ops)) {
895 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
896 cp->completed_kcq + i, j);
897 }
898 rcu_read_unlock();
899end:
900 num_cqes -= j;
901 i += j;
902 j = 1;
903 }
904 return;
905}
906
907static u16 cnic_bnx2_next_idx(u16 idx)
908{
909 return idx + 1;
910}
911
912static u16 cnic_bnx2_hw_idx(u16 idx)
913{
914 return idx;
915}
916
917static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
918{
919 struct cnic_local *cp = dev->cnic_priv;
920 u16 i, ri, last;
921 struct kcqe *kcqe;
922 int kcqe_cnt = 0, last_cnt = 0;
923
924 i = ri = last = *sw_prod;
925 ri &= MAX_KCQ_IDX;
926
927 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
928 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
929 cp->completed_kcq[kcqe_cnt++] = kcqe;
930 i = cp->next_idx(i);
931 ri = i & MAX_KCQ_IDX;
932 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
933 last_cnt = kcqe_cnt;
934 last = i;
935 }
936 }
937
938 *sw_prod = last;
939 return last_cnt;
940}
941
942static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
943{
944 u16 rx_cons = *cp->rx_cons_ptr;
945 u16 tx_cons = *cp->tx_cons_ptr;
946
947 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
948 cp->tx_cons = tx_cons;
949 cp->rx_cons = rx_cons;
950 uio_event_notify(cp->cnic_uinfo);
951 }
952}
953
954static int cnic_service_bnx2(void *data, void *status_blk)
955{
956 struct cnic_dev *dev = data;
957 struct status_block *sblk = status_blk;
958 struct cnic_local *cp = dev->cnic_priv;
959 u32 status_idx = sblk->status_idx;
960 u16 hw_prod, sw_prod;
961 int kcqe_cnt;
962
963 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
964 return status_idx;
965
966 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
967
968 hw_prod = sblk->status_completion_producer_index;
969 sw_prod = cp->kcq_prod_idx;
970 while (sw_prod != hw_prod) {
971 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
972 if (kcqe_cnt == 0)
973 goto done;
974
975 service_kcqes(dev, kcqe_cnt);
976
977 /* Tell compiler that status_blk fields can change. */
978 barrier();
979 if (status_idx != sblk->status_idx) {
980 status_idx = sblk->status_idx;
981 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
982 hw_prod = sblk->status_completion_producer_index;
983 } else
984 break;
985 }
986
987done:
988 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
989
990 cp->kcq_prod_idx = sw_prod;
991
992 cnic_chk_bnx2_pkt_rings(cp);
993 return status_idx;
994}
995
996static void cnic_service_bnx2_msix(unsigned long data)
997{
998 struct cnic_dev *dev = (struct cnic_dev *) data;
999 struct cnic_local *cp = dev->cnic_priv;
1000 struct status_block_msix *status_blk = cp->bnx2_status_blk;
1001 u32 status_idx = status_blk->status_idx;
1002 u16 hw_prod, sw_prod;
1003 int kcqe_cnt;
1004
1005 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1006
1007 hw_prod = status_blk->status_completion_producer_index;
1008 sw_prod = cp->kcq_prod_idx;
1009 while (sw_prod != hw_prod) {
1010 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
1011 if (kcqe_cnt == 0)
1012 goto done;
1013
1014 service_kcqes(dev, kcqe_cnt);
1015
1016 /* Tell compiler that status_blk fields can change. */
1017 barrier();
1018 if (status_idx != status_blk->status_idx) {
1019 status_idx = status_blk->status_idx;
1020 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1021 hw_prod = status_blk->status_completion_producer_index;
1022 } else
1023 break;
1024 }
1025
1026done:
1027 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1028 cp->kcq_prod_idx = sw_prod;
1029
1030 cnic_chk_bnx2_pkt_rings(cp);
1031
1032 cp->last_status_idx = status_idx;
1033 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
1034 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
1035}
1036
1037static irqreturn_t cnic_irq(int irq, void *dev_instance)
1038{
1039 struct cnic_dev *dev = dev_instance;
1040 struct cnic_local *cp = dev->cnic_priv;
1041 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
1042
1043 if (cp->ack_int)
1044 cp->ack_int(dev);
1045
1046 prefetch(cp->status_blk);
1047 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
1048
1049 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
1050 tasklet_schedule(&cp->cnic_irq_task);
1051
1052 return IRQ_HANDLED;
1053}
1054
1055static void cnic_ulp_stop(struct cnic_dev *dev)
1056{
1057 struct cnic_local *cp = dev->cnic_priv;
1058 int if_type;
1059
1060 rcu_read_lock();
1061 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1062 struct cnic_ulp_ops *ulp_ops;
1063
1064 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1065 if (!ulp_ops)
1066 continue;
1067
1068 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1069 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1070 }
1071 rcu_read_unlock();
1072}
1073
1074static void cnic_ulp_start(struct cnic_dev *dev)
1075{
1076 struct cnic_local *cp = dev->cnic_priv;
1077 int if_type;
1078
1079 rcu_read_lock();
1080 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1081 struct cnic_ulp_ops *ulp_ops;
1082
1083 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1084 if (!ulp_ops || !ulp_ops->cnic_start)
1085 continue;
1086
1087 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1088 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1089 }
1090 rcu_read_unlock();
1091}
1092
1093static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1094{
1095 struct cnic_dev *dev = data;
1096
1097 switch (info->cmd) {
1098 case CNIC_CTL_STOP_CMD:
1099 cnic_hold(dev);
1100 mutex_lock(&cnic_lock);
1101
1102 cnic_ulp_stop(dev);
1103 cnic_stop_hw(dev);
1104
1105 mutex_unlock(&cnic_lock);
1106 cnic_put(dev);
1107 break;
1108 case CNIC_CTL_START_CMD:
1109 cnic_hold(dev);
1110 mutex_lock(&cnic_lock);
1111
1112 if (!cnic_start_hw(dev))
1113 cnic_ulp_start(dev);
1114
1115 mutex_unlock(&cnic_lock);
1116 cnic_put(dev);
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121 return 0;
1122}
1123
1124static void cnic_ulp_init(struct cnic_dev *dev)
1125{
1126 int i;
1127 struct cnic_local *cp = dev->cnic_priv;
1128
1129 rcu_read_lock();
1130 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1131 struct cnic_ulp_ops *ulp_ops;
1132
1133 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1134 if (!ulp_ops || !ulp_ops->cnic_init)
1135 continue;
1136
1137 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1138 ulp_ops->cnic_init(dev);
1139
1140 }
1141 rcu_read_unlock();
1142}
1143
1144static void cnic_ulp_exit(struct cnic_dev *dev)
1145{
1146 int i;
1147 struct cnic_local *cp = dev->cnic_priv;
1148
1149 rcu_read_lock();
1150 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1151 struct cnic_ulp_ops *ulp_ops;
1152
1153 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1154 if (!ulp_ops || !ulp_ops->cnic_exit)
1155 continue;
1156
1157 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1158 ulp_ops->cnic_exit(dev);
1159
1160 }
1161 rcu_read_unlock();
1162}
1163
1164static int cnic_cm_offload_pg(struct cnic_sock *csk)
1165{
1166 struct cnic_dev *dev = csk->dev;
1167 struct l4_kwq_offload_pg *l4kwqe;
1168 struct kwqe *wqes[1];
1169
1170 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
1171 memset(l4kwqe, 0, sizeof(*l4kwqe));
1172 wqes[0] = (struct kwqe *) l4kwqe;
1173
1174 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
1175 l4kwqe->flags =
1176 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
1177 l4kwqe->l2hdr_nbytes = ETH_HLEN;
1178
1179 l4kwqe->da0 = csk->ha[0];
1180 l4kwqe->da1 = csk->ha[1];
1181 l4kwqe->da2 = csk->ha[2];
1182 l4kwqe->da3 = csk->ha[3];
1183 l4kwqe->da4 = csk->ha[4];
1184 l4kwqe->da5 = csk->ha[5];
1185
1186 l4kwqe->sa0 = dev->mac_addr[0];
1187 l4kwqe->sa1 = dev->mac_addr[1];
1188 l4kwqe->sa2 = dev->mac_addr[2];
1189 l4kwqe->sa3 = dev->mac_addr[3];
1190 l4kwqe->sa4 = dev->mac_addr[4];
1191 l4kwqe->sa5 = dev->mac_addr[5];
1192
1193 l4kwqe->etype = ETH_P_IP;
1194 l4kwqe->ipid_count = DEF_IPID_COUNT;
1195 l4kwqe->host_opaque = csk->l5_cid;
1196
1197 if (csk->vlan_id) {
1198 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
1199 l4kwqe->vlan_tag = csk->vlan_id;
1200 l4kwqe->l2hdr_nbytes += 4;
1201 }
1202
1203 return dev->submit_kwqes(dev, wqes, 1);
1204}
1205
1206static int cnic_cm_update_pg(struct cnic_sock *csk)
1207{
1208 struct cnic_dev *dev = csk->dev;
1209 struct l4_kwq_update_pg *l4kwqe;
1210 struct kwqe *wqes[1];
1211
1212 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
1213 memset(l4kwqe, 0, sizeof(*l4kwqe));
1214 wqes[0] = (struct kwqe *) l4kwqe;
1215
1216 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
1217 l4kwqe->flags =
1218 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
1219 l4kwqe->pg_cid = csk->pg_cid;
1220
1221 l4kwqe->da0 = csk->ha[0];
1222 l4kwqe->da1 = csk->ha[1];
1223 l4kwqe->da2 = csk->ha[2];
1224 l4kwqe->da3 = csk->ha[3];
1225 l4kwqe->da4 = csk->ha[4];
1226 l4kwqe->da5 = csk->ha[5];
1227
1228 l4kwqe->pg_host_opaque = csk->l5_cid;
1229 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
1230
1231 return dev->submit_kwqes(dev, wqes, 1);
1232}
1233
1234static int cnic_cm_upload_pg(struct cnic_sock *csk)
1235{
1236 struct cnic_dev *dev = csk->dev;
1237 struct l4_kwq_upload *l4kwqe;
1238 struct kwqe *wqes[1];
1239
1240 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
1241 memset(l4kwqe, 0, sizeof(*l4kwqe));
1242 wqes[0] = (struct kwqe *) l4kwqe;
1243
1244 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
1245 l4kwqe->flags =
1246 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
1247 l4kwqe->cid = csk->pg_cid;
1248
1249 return dev->submit_kwqes(dev, wqes, 1);
1250}
1251
1252static int cnic_cm_conn_req(struct cnic_sock *csk)
1253{
1254 struct cnic_dev *dev = csk->dev;
1255 struct l4_kwq_connect_req1 *l4kwqe1;
1256 struct l4_kwq_connect_req2 *l4kwqe2;
1257 struct l4_kwq_connect_req3 *l4kwqe3;
1258 struct kwqe *wqes[3];
1259 u8 tcp_flags = 0;
1260 int num_wqes = 2;
1261
1262 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
1263 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
1264 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
1265 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
1266 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
1267 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
1268
1269 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
1270 l4kwqe3->flags =
1271 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
1272 l4kwqe3->ka_timeout = csk->ka_timeout;
1273 l4kwqe3->ka_interval = csk->ka_interval;
1274 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
1275 l4kwqe3->tos = csk->tos;
1276 l4kwqe3->ttl = csk->ttl;
1277 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
1278 l4kwqe3->pmtu = csk->mtu;
1279 l4kwqe3->rcv_buf = csk->rcv_buf;
1280 l4kwqe3->snd_buf = csk->snd_buf;
1281 l4kwqe3->seed = csk->seed;
1282
1283 wqes[0] = (struct kwqe *) l4kwqe1;
1284 if (test_bit(SK_F_IPV6, &csk->flags)) {
1285 wqes[1] = (struct kwqe *) l4kwqe2;
1286 wqes[2] = (struct kwqe *) l4kwqe3;
1287 num_wqes = 3;
1288
1289 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
1290 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
1291 l4kwqe2->flags =
1292 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
1293 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
1294 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
1295 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
1296 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
1297 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
1298 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
1299 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
1300 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
1301 sizeof(struct tcphdr);
1302 } else {
1303 wqes[1] = (struct kwqe *) l4kwqe3;
1304 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
1305 sizeof(struct tcphdr);
1306 }
1307
1308 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
1309 l4kwqe1->flags =
1310 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
1311 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
1312 l4kwqe1->cid = csk->cid;
1313 l4kwqe1->pg_cid = csk->pg_cid;
1314 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
1315 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
1316 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
1317 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
1318 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
1319 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
1320 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
1321 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
1322 if (csk->tcp_flags & SK_TCP_NAGLE)
1323 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
1324 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
1325 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
1326 if (csk->tcp_flags & SK_TCP_SACK)
1327 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
1328 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
1329 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
1330
1331 l4kwqe1->tcp_flags = tcp_flags;
1332
1333 return dev->submit_kwqes(dev, wqes, num_wqes);
1334}
1335
1336static int cnic_cm_close_req(struct cnic_sock *csk)
1337{
1338 struct cnic_dev *dev = csk->dev;
1339 struct l4_kwq_close_req *l4kwqe;
1340 struct kwqe *wqes[1];
1341
1342 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
1343 memset(l4kwqe, 0, sizeof(*l4kwqe));
1344 wqes[0] = (struct kwqe *) l4kwqe;
1345
1346 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
1347 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
1348 l4kwqe->cid = csk->cid;
1349
1350 return dev->submit_kwqes(dev, wqes, 1);
1351}
1352
1353static int cnic_cm_abort_req(struct cnic_sock *csk)
1354{
1355 struct cnic_dev *dev = csk->dev;
1356 struct l4_kwq_reset_req *l4kwqe;
1357 struct kwqe *wqes[1];
1358
1359 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
1360 memset(l4kwqe, 0, sizeof(*l4kwqe));
1361 wqes[0] = (struct kwqe *) l4kwqe;
1362
1363 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
1364 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
1365 l4kwqe->cid = csk->cid;
1366
1367 return dev->submit_kwqes(dev, wqes, 1);
1368}
1369
1370static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
1371 u32 l5_cid, struct cnic_sock **csk, void *context)
1372{
1373 struct cnic_local *cp = dev->cnic_priv;
1374 struct cnic_sock *csk1;
1375
1376 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1377 return -EINVAL;
1378
1379 csk1 = &cp->csk_tbl[l5_cid];
1380 if (atomic_read(&csk1->ref_count))
1381 return -EAGAIN;
1382
1383 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
1384 return -EBUSY;
1385
1386 csk1->dev = dev;
1387 csk1->cid = cid;
1388 csk1->l5_cid = l5_cid;
1389 csk1->ulp_type = ulp_type;
1390 csk1->context = context;
1391
1392 csk1->ka_timeout = DEF_KA_TIMEOUT;
1393 csk1->ka_interval = DEF_KA_INTERVAL;
1394 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
1395 csk1->tos = DEF_TOS;
1396 csk1->ttl = DEF_TTL;
1397 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
1398 csk1->rcv_buf = DEF_RCV_BUF;
1399 csk1->snd_buf = DEF_SND_BUF;
1400 csk1->seed = DEF_SEED;
1401
1402 *csk = csk1;
1403 return 0;
1404}
1405
1406static void cnic_cm_cleanup(struct cnic_sock *csk)
1407{
1408 if (csk->src_port) {
1409 struct cnic_dev *dev = csk->dev;
1410 struct cnic_local *cp = dev->cnic_priv;
1411
1412 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
1413 csk->src_port = 0;
1414 }
1415}
1416
1417static void cnic_close_conn(struct cnic_sock *csk)
1418{
1419 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
1420 cnic_cm_upload_pg(csk);
1421 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1422 }
1423 cnic_cm_cleanup(csk);
1424}
1425
1426static int cnic_cm_destroy(struct cnic_sock *csk)
1427{
1428 if (!cnic_in_use(csk))
1429 return -EINVAL;
1430
1431 csk_hold(csk);
1432 clear_bit(SK_F_INUSE, &csk->flags);
1433 smp_mb__after_clear_bit();
1434 while (atomic_read(&csk->ref_count) != 1)
1435 msleep(1);
1436 cnic_cm_cleanup(csk);
1437
1438 csk->flags = 0;
1439 csk_put(csk);
1440 return 0;
1441}
1442
1443static inline u16 cnic_get_vlan(struct net_device *dev,
1444 struct net_device **vlan_dev)
1445{
1446 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1447 *vlan_dev = vlan_dev_real_dev(dev);
1448 return vlan_dev_vlan_id(dev);
1449 }
1450 *vlan_dev = dev;
1451 return 0;
1452}
1453
1454static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
1455 struct dst_entry **dst)
1456{
1457 struct flowi fl;
1458 int err;
1459 struct rtable *rt;
1460
1461 memset(&fl, 0, sizeof(fl));
1462 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
1463
1464 err = ip_route_output_key(&init_net, &rt, &fl);
1465 if (!err)
1466 *dst = &rt->u.dst;
1467 return err;
1468}
1469
1470static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
1471 struct dst_entry **dst)
1472{
1473#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1474 struct flowi fl;
1475
1476 memset(&fl, 0, sizeof(fl));
1477 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
1478 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
1479 fl.oif = dst_addr->sin6_scope_id;
1480
1481 *dst = ip6_route_output(&init_net, NULL, &fl);
1482 if (*dst)
1483 return 0;
1484#endif
1485
1486 return -ENETUNREACH;
1487}
1488
1489static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
1490 int ulp_type)
1491{
1492 struct cnic_dev *dev = NULL;
1493 struct dst_entry *dst;
1494 struct net_device *netdev = NULL;
1495 int err = -ENETUNREACH;
1496
1497 if (dst_addr->sin_family == AF_INET)
1498 err = cnic_get_v4_route(dst_addr, &dst);
1499 else if (dst_addr->sin_family == AF_INET6) {
1500 struct sockaddr_in6 *dst_addr6 =
1501 (struct sockaddr_in6 *) dst_addr;
1502
1503 err = cnic_get_v6_route(dst_addr6, &dst);
1504 } else
1505 return NULL;
1506
1507 if (err)
1508 return NULL;
1509
1510 if (!dst->dev)
1511 goto done;
1512
1513 cnic_get_vlan(dst->dev, &netdev);
1514
1515 dev = cnic_from_netdev(netdev);
1516
1517done:
1518 dst_release(dst);
1519 if (dev)
1520 cnic_put(dev);
1521 return dev;
1522}
1523
1524static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1525{
1526 struct cnic_dev *dev = csk->dev;
1527 struct cnic_local *cp = dev->cnic_priv;
1528
1529 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
1530}
1531
1532static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1533{
1534 struct cnic_dev *dev = csk->dev;
1535 struct cnic_local *cp = dev->cnic_priv;
1536 int is_v6, err, rc = -ENETUNREACH;
1537 struct dst_entry *dst;
1538 struct net_device *realdev;
1539 u32 local_port;
1540
1541 if (saddr->local.v6.sin6_family == AF_INET6 &&
1542 saddr->remote.v6.sin6_family == AF_INET6)
1543 is_v6 = 1;
1544 else if (saddr->local.v4.sin_family == AF_INET &&
1545 saddr->remote.v4.sin_family == AF_INET)
1546 is_v6 = 0;
1547 else
1548 return -EINVAL;
1549
1550 clear_bit(SK_F_IPV6, &csk->flags);
1551
1552 if (is_v6) {
1553#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1554 set_bit(SK_F_IPV6, &csk->flags);
1555 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
1556 if (err)
1557 return err;
1558
1559 if (!dst || dst->error || !dst->dev)
1560 goto err_out;
1561
1562 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
1563 sizeof(struct in6_addr));
1564 csk->dst_port = saddr->remote.v6.sin6_port;
1565 local_port = saddr->local.v6.sin6_port;
1566#else
1567 return rc;
1568#endif
1569
1570 } else {
1571 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
1572 if (err)
1573 return err;
1574
1575 if (!dst || dst->error || !dst->dev)
1576 goto err_out;
1577
1578 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
1579 csk->dst_port = saddr->remote.v4.sin_port;
1580 local_port = saddr->local.v4.sin_port;
1581 }
1582
1583 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
1584 if (realdev != dev->netdev)
1585 goto err_out;
1586
1587 if (local_port >= CNIC_LOCAL_PORT_MIN &&
1588 local_port < CNIC_LOCAL_PORT_MAX) {
1589 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
1590 local_port = 0;
1591 } else
1592 local_port = 0;
1593
1594 if (!local_port) {
1595 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
1596 if (local_port == -1) {
1597 rc = -ENOMEM;
1598 goto err_out;
1599 }
1600 }
1601 csk->src_port = local_port;
1602
1603 csk->mtu = dst_mtu(dst);
1604 rc = 0;
1605
1606err_out:
1607 dst_release(dst);
1608 return rc;
1609}
1610
1611static void cnic_init_csk_state(struct cnic_sock *csk)
1612{
1613 csk->state = 0;
1614 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1615 clear_bit(SK_F_CLOSING, &csk->flags);
1616}
1617
1618static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1619{
1620 int err = 0;
1621
1622 if (!cnic_in_use(csk))
1623 return -EINVAL;
1624
1625 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
1626 return -EINVAL;
1627
1628 cnic_init_csk_state(csk);
1629
1630 err = cnic_get_route(csk, saddr);
1631 if (err)
1632 goto err_out;
1633
1634 err = cnic_resolve_addr(csk, saddr);
1635 if (!err)
1636 return 0;
1637
1638err_out:
1639 clear_bit(SK_F_CONNECT_START, &csk->flags);
1640 return err;
1641}
1642
1643static int cnic_cm_abort(struct cnic_sock *csk)
1644{
1645 struct cnic_local *cp = csk->dev->cnic_priv;
1646 u32 opcode;
1647
1648 if (!cnic_in_use(csk))
1649 return -EINVAL;
1650
1651 if (cnic_abort_prep(csk))
1652 return cnic_cm_abort_req(csk);
1653
1654 /* Getting here means that we haven't started connect, or
1655 * connect was not successful.
1656 */
1657
1658 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1659 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1660 opcode = csk->state;
1661 else
1662 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
1663 cp->close_conn(csk, opcode);
1664
1665 return 0;
1666}
1667
1668static int cnic_cm_close(struct cnic_sock *csk)
1669{
1670 if (!cnic_in_use(csk))
1671 return -EINVAL;
1672
1673 if (cnic_close_prep(csk)) {
1674 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
1675 return cnic_cm_close_req(csk);
1676 }
1677 return 0;
1678}
1679
1680static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
1681 u8 opcode)
1682{
1683 struct cnic_ulp_ops *ulp_ops;
1684 int ulp_type = csk->ulp_type;
1685
1686 rcu_read_lock();
1687 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1688 if (ulp_ops) {
1689 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
1690 ulp_ops->cm_connect_complete(csk);
1691 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
1692 ulp_ops->cm_close_complete(csk);
1693 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
1694 ulp_ops->cm_remote_abort(csk);
1695 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
1696 ulp_ops->cm_abort_complete(csk);
1697 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
1698 ulp_ops->cm_remote_close(csk);
1699 }
1700 rcu_read_unlock();
1701}
1702
1703static int cnic_cm_set_pg(struct cnic_sock *csk)
1704{
1705 if (cnic_offld_prep(csk)) {
1706 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1707 cnic_cm_update_pg(csk);
1708 else
1709 cnic_cm_offload_pg(csk);
1710 }
1711 return 0;
1712}
1713
1714static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
1715{
1716 struct cnic_local *cp = dev->cnic_priv;
1717 u32 l5_cid = kcqe->pg_host_opaque;
1718 u8 opcode = kcqe->op_code;
1719 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1720
1721 csk_hold(csk);
1722 if (!cnic_in_use(csk))
1723 goto done;
1724
1725 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1726 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1727 goto done;
1728 }
1729 csk->pg_cid = kcqe->pg_cid;
1730 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1731 cnic_cm_conn_req(csk);
1732
1733done:
1734 csk_put(csk);
1735}
1736
1737static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1738{
1739 struct cnic_local *cp = dev->cnic_priv;
1740 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
1741 u8 opcode = l4kcqe->op_code;
1742 u32 l5_cid;
1743 struct cnic_sock *csk;
1744
1745 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
1746 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1747 cnic_cm_process_offld_pg(dev, l4kcqe);
1748 return;
1749 }
1750
1751 l5_cid = l4kcqe->conn_id;
1752 if (opcode & 0x80)
1753 l5_cid = l4kcqe->cid;
1754 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1755 return;
1756
1757 csk = &cp->csk_tbl[l5_cid];
1758 csk_hold(csk);
1759
1760 if (!cnic_in_use(csk)) {
1761 csk_put(csk);
1762 return;
1763 }
1764
1765 switch (opcode) {
1766 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
1767 if (l4kcqe->status == 0)
1768 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
1769
1770 smp_mb__before_clear_bit();
1771 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1772 cnic_cm_upcall(cp, csk, opcode);
1773 break;
1774
1775 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
1776 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
1777 csk->state = opcode;
1778 /* fall through */
1779 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1780 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
1781 cp->close_conn(csk, opcode);
1782 break;
1783
1784 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
1785 cnic_cm_upcall(cp, csk, opcode);
1786 break;
1787 }
1788 csk_put(csk);
1789}
1790
1791static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
1792{
1793 struct cnic_dev *dev = data;
1794 int i;
1795
1796 for (i = 0; i < num; i++)
1797 cnic_cm_process_kcqe(dev, kcqe[i]);
1798}
1799
1800static struct cnic_ulp_ops cm_ulp_ops = {
1801 .indicate_kcqes = cnic_cm_indicate_kcqe,
1802};
1803
1804static void cnic_cm_free_mem(struct cnic_dev *dev)
1805{
1806 struct cnic_local *cp = dev->cnic_priv;
1807
1808 kfree(cp->csk_tbl);
1809 cp->csk_tbl = NULL;
1810 cnic_free_id_tbl(&cp->csk_port_tbl);
1811}
1812
1813static int cnic_cm_alloc_mem(struct cnic_dev *dev)
1814{
1815 struct cnic_local *cp = dev->cnic_priv;
1816
1817 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
1818 GFP_KERNEL);
1819 if (!cp->csk_tbl)
1820 return -ENOMEM;
1821
1822 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
1823 CNIC_LOCAL_PORT_MIN)) {
1824 cnic_cm_free_mem(dev);
1825 return -ENOMEM;
1826 }
1827 return 0;
1828}
1829
1830static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
1831{
1832 if ((opcode == csk->state) ||
1833 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
1834 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
1835 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
1836 return 1;
1837 }
1838 return 0;
1839}
1840
1841static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
1842{
1843 struct cnic_dev *dev = csk->dev;
1844 struct cnic_local *cp = dev->cnic_priv;
1845
1846 clear_bit(SK_F_CONNECT_START, &csk->flags);
1847 if (cnic_ready_to_close(csk, opcode)) {
1848 cnic_close_conn(csk);
1849 cnic_cm_upcall(cp, csk, opcode);
1850 }
1851}
1852
1853static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
1854{
1855}
1856
1857static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1858{
1859 u32 seed;
1860
1861 get_random_bytes(&seed, 4);
1862 cnic_ctx_wr(dev, 45, 0, seed);
1863 return 0;
1864}
1865
1866static int cnic_cm_open(struct cnic_dev *dev)
1867{
1868 struct cnic_local *cp = dev->cnic_priv;
1869 int err;
1870
1871 err = cnic_cm_alloc_mem(dev);
1872 if (err)
1873 return err;
1874
1875 err = cp->start_cm(dev);
1876
1877 if (err)
1878 goto err_out;
1879
1880 dev->cm_create = cnic_cm_create;
1881 dev->cm_destroy = cnic_cm_destroy;
1882 dev->cm_connect = cnic_cm_connect;
1883 dev->cm_abort = cnic_cm_abort;
1884 dev->cm_close = cnic_cm_close;
1885 dev->cm_select_dev = cnic_cm_select_dev;
1886
1887 cp->ulp_handle[CNIC_ULP_L4] = dev;
1888 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
1889 return 0;
1890
1891err_out:
1892 cnic_cm_free_mem(dev);
1893 return err;
1894}
1895
1896static int cnic_cm_shutdown(struct cnic_dev *dev)
1897{
1898 struct cnic_local *cp = dev->cnic_priv;
1899 int i;
1900
1901 cp->stop_cm(dev);
1902
1903 if (!cp->csk_tbl)
1904 return 0;
1905
1906 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
1907 struct cnic_sock *csk = &cp->csk_tbl[i];
1908
1909 clear_bit(SK_F_INUSE, &csk->flags);
1910 cnic_cm_cleanup(csk);
1911 }
1912 cnic_cm_free_mem(dev);
1913
1914 return 0;
1915}
1916
1917static void cnic_init_context(struct cnic_dev *dev, u32 cid)
1918{
1919 struct cnic_local *cp = dev->cnic_priv;
1920 u32 cid_addr;
1921 int i;
1922
1923 if (CHIP_NUM(cp) == CHIP_NUM_5709)
1924 return;
1925
1926 cid_addr = GET_CID_ADDR(cid);
1927
1928 for (i = 0; i < CTX_SIZE; i += 4)
1929 cnic_ctx_wr(dev, cid_addr, i, 0);
1930}
1931
1932static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
1933{
1934 struct cnic_local *cp = dev->cnic_priv;
1935 int ret = 0, i;
1936 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
1937
1938 if (CHIP_NUM(cp) != CHIP_NUM_5709)
1939 return 0;
1940
1941 for (i = 0; i < cp->ctx_blks; i++) {
1942 int j;
1943 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
1944 u32 val;
1945
1946 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
1947
1948 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1949 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
1950 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1951 (u64) cp->ctx_arr[i].mapping >> 32);
1952 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
1953 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1954 for (j = 0; j < 10; j++) {
1955
1956 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1957 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1958 break;
1959 udelay(5);
1960 }
1961 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1962 ret = -EBUSY;
1963 break;
1964 }
1965 }
1966 return ret;
1967}
1968
1969static void cnic_free_irq(struct cnic_dev *dev)
1970{
1971 struct cnic_local *cp = dev->cnic_priv;
1972 struct cnic_eth_dev *ethdev = cp->ethdev;
1973
1974 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1975 cp->disable_int_sync(dev);
1976 tasklet_disable(&cp->cnic_irq_task);
1977 free_irq(ethdev->irq_arr[0].vector, dev);
1978 }
1979}
1980
1981static int cnic_init_bnx2_irq(struct cnic_dev *dev)
1982{
1983 struct cnic_local *cp = dev->cnic_priv;
1984 struct cnic_eth_dev *ethdev = cp->ethdev;
1985
1986 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1987 int err, i = 0;
1988 int sblk_num = cp->status_blk_num;
1989 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
1990 BNX2_HC_SB_CONFIG_1;
1991
1992 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
1993
1994 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
1995 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
1996 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
1997
1998 cp->bnx2_status_blk = cp->status_blk;
1999 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2000 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
2001 (unsigned long) dev);
2002 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2003 "cnic", dev);
2004 if (err) {
2005 tasklet_disable(&cp->cnic_irq_task);
2006 return err;
2007 }
2008 while (cp->bnx2_status_blk->status_completion_producer_index &&
2009 i < 10) {
2010 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
2011 1 << (11 + sblk_num));
2012 udelay(10);
2013 i++;
2014 barrier();
2015 }
2016 if (cp->bnx2_status_blk->status_completion_producer_index) {
2017 cnic_free_irq(dev);
2018 goto failed;
2019 }
2020
2021 } else {
2022 struct status_block *sblk = cp->status_blk;
2023 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
2024 int i = 0;
2025
2026 while (sblk->status_completion_producer_index && i < 10) {
2027 CNIC_WR(dev, BNX2_HC_COMMAND,
2028 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2029 udelay(10);
2030 i++;
2031 barrier();
2032 }
2033 if (sblk->status_completion_producer_index)
2034 goto failed;
2035
2036 }
2037 return 0;
2038
2039failed:
2040 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
2041 dev->netdev->name);
2042 return -EBUSY;
2043}
2044
2045static void cnic_enable_bnx2_int(struct cnic_dev *dev)
2046{
2047 struct cnic_local *cp = dev->cnic_priv;
2048 struct cnic_eth_dev *ethdev = cp->ethdev;
2049
2050 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2051 return;
2052
2053 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2054 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2055}
2056
2057static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
2058{
2059 struct cnic_local *cp = dev->cnic_priv;
2060 struct cnic_eth_dev *ethdev = cp->ethdev;
2061
2062 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2063 return;
2064
2065 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2066 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2067 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
2068 synchronize_irq(ethdev->irq_arr[0].vector);
2069}
2070
2071static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
2072{
2073 struct cnic_local *cp = dev->cnic_priv;
2074 struct cnic_eth_dev *ethdev = cp->ethdev;
2075 u32 cid_addr, tx_cid, sb_id;
2076 u32 val, offset0, offset1, offset2, offset3;
2077 int i;
2078 struct tx_bd *txbd;
2079 dma_addr_t buf_map;
2080 struct status_block *s_blk = cp->status_blk;
2081
2082 sb_id = cp->status_blk_num;
2083 tx_cid = 20;
2084 cnic_init_context(dev, tx_cid);
2085 cnic_init_context(dev, tx_cid + 1);
2086 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
2087 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2088 struct status_block_msix *sblk = cp->status_blk;
2089
2090 tx_cid = TX_TSS_CID + sb_id - 1;
2091 cnic_init_context(dev, tx_cid);
2092 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
2093 (TX_TSS_CID << 7));
2094 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
2095 }
2096 cp->tx_cons = *cp->tx_cons_ptr;
2097
2098 cid_addr = GET_CID_ADDR(tx_cid);
2099 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
2100 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
2101
2102 for (i = 0; i < PHY_CTX_SIZE; i += 4)
2103 cnic_ctx_wr(dev, cid_addr2, i, 0);
2104
2105 offset0 = BNX2_L2CTX_TYPE_XI;
2106 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
2107 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
2108 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
2109 } else {
2110 offset0 = BNX2_L2CTX_TYPE;
2111 offset1 = BNX2_L2CTX_CMD_TYPE;
2112 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
2113 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
2114 }
2115 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
2116 cnic_ctx_wr(dev, cid_addr, offset0, val);
2117
2118 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
2119 cnic_ctx_wr(dev, cid_addr, offset1, val);
2120
2121 txbd = (struct tx_bd *) cp->l2_ring;
2122
2123 buf_map = cp->l2_buf_map;
2124 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
2125 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
2126 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2127 }
2128 val = (u64) cp->l2_ring_map >> 32;
2129 cnic_ctx_wr(dev, cid_addr, offset2, val);
2130 txbd->tx_bd_haddr_hi = val;
2131
2132 val = (u64) cp->l2_ring_map & 0xffffffff;
2133 cnic_ctx_wr(dev, cid_addr, offset3, val);
2134 txbd->tx_bd_haddr_lo = val;
2135}
2136
2137static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2138{
2139 struct cnic_local *cp = dev->cnic_priv;
2140 struct cnic_eth_dev *ethdev = cp->ethdev;
2141 u32 cid_addr, sb_id, val, coal_reg, coal_val;
2142 int i;
2143 struct rx_bd *rxbd;
2144 struct status_block *s_blk = cp->status_blk;
2145
2146 sb_id = cp->status_blk_num;
2147 cnic_init_context(dev, 2);
2148 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
2149 coal_reg = BNX2_HC_COMMAND;
2150 coal_val = CNIC_RD(dev, coal_reg);
2151 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2152 struct status_block_msix *sblk = cp->status_blk;
2153
2154 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
2155 coal_reg = BNX2_HC_COALESCE_NOW;
2156 coal_val = 1 << (11 + sb_id);
2157 }
2158 i = 0;
2159 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
2160 CNIC_WR(dev, coal_reg, coal_val);
2161 udelay(10);
2162 i++;
2163 barrier();
2164 }
2165 cp->rx_cons = *cp->rx_cons_ptr;
2166
2167 cid_addr = GET_CID_ADDR(2);
2168 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
2169 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
2170 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2171
2172 if (sb_id == 0)
2173 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
2174 else
2175 val = BNX2_L2CTX_STATUSB_NUM(sb_id);
2176 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2177
2178 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
2179 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2180 dma_addr_t buf_map;
2181 int n = (i % cp->l2_rx_ring_size) + 1;
2182
2183 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
2184 rxbd->rx_bd_len = cp->l2_single_buf_size;
2185 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
2186 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
2187 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2188 }
2189 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
2190 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
2191 rxbd->rx_bd_haddr_hi = val;
2192
2193 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
2194 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
2195 rxbd->rx_bd_haddr_lo = val;
2196
2197 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
2198 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
2199}
2200
2201static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
2202{
2203 struct kwqe *wqes[1], l2kwqe;
2204
2205 memset(&l2kwqe, 0, sizeof(l2kwqe));
2206 wqes[0] = &l2kwqe;
2207 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
2208 (L2_KWQE_OPCODE_VALUE_FLUSH <<
2209 KWQE_OPCODE_SHIFT) | 2;
2210 dev->submit_kwqes(dev, wqes, 1);
2211}
2212
2213static void cnic_set_bnx2_mac(struct cnic_dev *dev)
2214{
2215 struct cnic_local *cp = dev->cnic_priv;
2216 u32 val;
2217
2218 val = cp->func << 2;
2219
2220 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
2221
2222 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2223 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
2224 dev->mac_addr[0] = (u8) (val >> 8);
2225 dev->mac_addr[1] = (u8) val;
2226
2227 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
2228
2229 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2230 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
2231 dev->mac_addr[2] = (u8) (val >> 24);
2232 dev->mac_addr[3] = (u8) (val >> 16);
2233 dev->mac_addr[4] = (u8) (val >> 8);
2234 dev->mac_addr[5] = (u8) val;
2235
2236 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
2237
2238 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
2239 if (CHIP_NUM(cp) != CHIP_NUM_5709)
2240 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
2241
2242 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
2243 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
2244 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
2245}
2246
2247static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2248{
2249 struct cnic_local *cp = dev->cnic_priv;
2250 struct cnic_eth_dev *ethdev = cp->ethdev;
2251 struct status_block *sblk = cp->status_blk;
2252 u32 val;
2253 int err;
2254
2255 cnic_set_bnx2_mac(dev);
2256
2257 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
2258 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2259 if (BCM_PAGE_BITS > 12)
2260 val |= (12 - 8) << 4;
2261 else
2262 val |= (BCM_PAGE_BITS - 8) << 4;
2263
2264 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
2265
2266 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
2267 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
2268 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
2269
2270 err = cnic_setup_5709_context(dev, 1);
2271 if (err)
2272 return err;
2273
2274 cnic_init_context(dev, KWQ_CID);
2275 cnic_init_context(dev, KCQ_CID);
2276
2277 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
2278 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
2279
2280 cp->max_kwq_idx = MAX_KWQ_IDX;
2281 cp->kwq_prod_idx = 0;
2282 cp->kwq_con_idx = 0;
2283 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
2284
2285 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
2286 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
2287 else
2288 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
2289
2290 /* Initialize the kernel work queue context. */
2291 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2292 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2293 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
2294
2295 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
2296 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2297
2298 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
2299 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2300
2301 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
2302 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2303
2304 val = (u32) cp->kwq_info.pgtbl_map;
2305 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2306
2307 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
2308 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
2309
2310 cp->kcq_prod_idx = 0;
2311
2312 /* Initialize the kernel complete queue context. */
2313 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2314 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2315 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
2316
2317 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
2318 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2319
2320 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
2321 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2322
2323 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
2324 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2325
2326 val = (u32) cp->kcq_info.pgtbl_map;
2327 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2328
2329 cp->int_num = 0;
2330 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2331 u32 sb_id = cp->status_blk_num;
2332 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
2333
2334 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2335 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2336 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2337 }
2338
2339 /* Enable Commnad Scheduler notification when we write to the
2340 * host producer index of the kernel contexts. */
2341 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
2342
2343 /* Enable Command Scheduler notification when we write to either
2344 * the Send Queue or Receive Queue producer indexes of the kernel
2345 * bypass contexts. */
2346 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
2347 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
2348
2349 /* Notify COM when the driver post an application buffer. */
2350 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
2351
2352 /* Set the CP and COM doorbells. These two processors polls the
2353 * doorbell for a non zero value before running. This must be done
2354 * after setting up the kernel queue contexts. */
2355 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
2356 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
2357
2358 cnic_init_bnx2_tx_ring(dev);
2359 cnic_init_bnx2_rx_ring(dev);
2360
2361 err = cnic_init_bnx2_irq(dev);
2362 if (err) {
2363 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
2364 dev->netdev->name);
2365 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2366 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2367 return err;
2368 }
2369
2370 return 0;
2371}
2372
2373static int cnic_start_hw(struct cnic_dev *dev)
2374{
2375 struct cnic_local *cp = dev->cnic_priv;
2376 struct cnic_eth_dev *ethdev = cp->ethdev;
2377 int err;
2378
2379 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2380 return -EALREADY;
2381
2382 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2383 if (err) {
2384 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2385 dev->netdev->name);
2386 goto err2;
2387 }
2388
2389 dev->regview = ethdev->io_base;
2390 cp->chip_id = ethdev->chip_id;
2391 pci_dev_get(dev->pcidev);
2392 cp->func = PCI_FUNC(dev->pcidev->devfn);
2393 cp->status_blk = ethdev->irq_arr[0].status_blk;
2394 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
2395
2396 err = cp->alloc_resc(dev);
2397 if (err) {
2398 printk(KERN_ERR PFX "%s: allocate resource failure\n",
2399 dev->netdev->name);
2400 goto err1;
2401 }
2402
2403 err = cp->start_hw(dev);
2404 if (err)
2405 goto err1;
2406
2407 err = cnic_cm_open(dev);
2408 if (err)
2409 goto err1;
2410
2411 set_bit(CNIC_F_CNIC_UP, &dev->flags);
2412
2413 cp->enable_int(dev);
2414
2415 return 0;
2416
2417err1:
2418 ethdev->drv_unregister_cnic(dev->netdev);
2419 cp->free_resc(dev);
2420 pci_dev_put(dev->pcidev);
2421err2:
2422 return err;
2423}
2424
2425static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2426{
2427 struct cnic_local *cp = dev->cnic_priv;
2428 struct cnic_eth_dev *ethdev = cp->ethdev;
2429
2430 cnic_disable_bnx2_int_sync(dev);
2431
2432 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2433 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2434
2435 cnic_init_context(dev, KWQ_CID);
2436 cnic_init_context(dev, KCQ_CID);
2437
2438 cnic_setup_5709_context(dev, 0);
2439 cnic_free_irq(dev);
2440
2441 ethdev->drv_unregister_cnic(dev->netdev);
2442
2443 cnic_free_resc(dev);
2444}
2445
2446static void cnic_stop_hw(struct cnic_dev *dev)
2447{
2448 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2449 struct cnic_local *cp = dev->cnic_priv;
2450
2451 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
2452 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
2453 synchronize_rcu();
2454 cnic_cm_shutdown(dev);
2455 cp->stop_hw(dev);
2456 pci_dev_put(dev->pcidev);
2457 }
2458}
2459
2460static void cnic_free_dev(struct cnic_dev *dev)
2461{
2462 int i = 0;
2463
2464 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
2465 msleep(100);
2466 i++;
2467 }
2468 if (atomic_read(&dev->ref_count) != 0)
2469 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
2470 " to zero.\n", dev->netdev->name);
2471
2472 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
2473 dev_put(dev->netdev);
2474 kfree(dev);
2475}
2476
2477static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
2478 struct pci_dev *pdev)
2479{
2480 struct cnic_dev *cdev;
2481 struct cnic_local *cp;
2482 int alloc_size;
2483
2484 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
2485
2486 cdev = kzalloc(alloc_size , GFP_KERNEL);
2487 if (cdev == NULL) {
2488 printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
2489 dev->name);
2490 return NULL;
2491 }
2492
2493 cdev->netdev = dev;
2494 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
2495 cdev->register_device = cnic_register_device;
2496 cdev->unregister_device = cnic_unregister_device;
2497 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
2498
2499 cp = cdev->cnic_priv;
2500 cp->dev = cdev;
2501 cp->uio_dev = -1;
2502 cp->l2_single_buf_size = 0x400;
2503 cp->l2_rx_ring_size = 3;
2504
2505 spin_lock_init(&cp->cnic_ulp_lock);
2506
2507 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
2508
2509 return cdev;
2510}
2511
2512static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2513{
2514 struct pci_dev *pdev;
2515 struct cnic_dev *cdev;
2516 struct cnic_local *cp;
2517 struct cnic_eth_dev *ethdev = NULL;
2518 struct cnic_eth_dev *(*probe)(void *) = NULL;
2519
2520 probe = __symbol_get("bnx2_cnic_probe");
2521 if (probe) {
2522 ethdev = (*probe)(dev);
2523 symbol_put_addr(probe);
2524 }
2525 if (!ethdev)
2526 return NULL;
2527
2528 pdev = ethdev->pdev;
2529 if (!pdev)
2530 return NULL;
2531
2532 dev_hold(dev);
2533 pci_dev_get(pdev);
2534 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
2535 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
2536 u8 rev;
2537
2538 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
2539 if (rev < 0x10) {
2540 pci_dev_put(pdev);
2541 goto cnic_err;
2542 }
2543 }
2544 pci_dev_put(pdev);
2545
2546 cdev = cnic_alloc_dev(dev, pdev);
2547 if (cdev == NULL)
2548 goto cnic_err;
2549
2550 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
2551 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
2552
2553 cp = cdev->cnic_priv;
2554 cp->ethdev = ethdev;
2555 cdev->pcidev = pdev;
2556
2557 cp->cnic_ops = &cnic_bnx2_ops;
2558 cp->start_hw = cnic_start_bnx2_hw;
2559 cp->stop_hw = cnic_stop_bnx2_hw;
2560 cp->setup_pgtbl = cnic_setup_page_tbl;
2561 cp->alloc_resc = cnic_alloc_bnx2_resc;
2562 cp->free_resc = cnic_free_resc;
2563 cp->start_cm = cnic_cm_init_bnx2_hw;
2564 cp->stop_cm = cnic_cm_stop_bnx2_hw;
2565 cp->enable_int = cnic_enable_bnx2_int;
2566 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
2567 cp->close_conn = cnic_close_bnx2_conn;
2568 cp->next_idx = cnic_bnx2_next_idx;
2569 cp->hw_idx = cnic_bnx2_hw_idx;
2570 return cdev;
2571
2572cnic_err:
2573 dev_put(dev);
2574 return NULL;
2575}
2576
2577static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2578{
2579 struct ethtool_drvinfo drvinfo;
2580 struct cnic_dev *cdev = NULL;
2581
2582 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
2583 memset(&drvinfo, 0, sizeof(drvinfo));
2584 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
2585
2586 if (!strcmp(drvinfo.driver, "bnx2"))
2587 cdev = init_bnx2_cnic(dev);
2588 if (cdev) {
2589 write_lock(&cnic_dev_lock);
2590 list_add(&cdev->list, &cnic_dev_list);
2591 write_unlock(&cnic_dev_lock);
2592 }
2593 }
2594 return cdev;
2595}
2596
2597/**
2598 * netdev event handler
2599 */
2600static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2601 void *ptr)
2602{
2603 struct net_device *netdev = ptr;
2604 struct cnic_dev *dev;
2605 int if_type;
2606 int new_dev = 0;
2607
2608 dev = cnic_from_netdev(netdev);
2609
2610 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
2611 /* Check for the hot-plug device */
2612 dev = is_cnic_dev(netdev);
2613 if (dev) {
2614 new_dev = 1;
2615 cnic_hold(dev);
2616 }
2617 }
2618 if (dev) {
2619 struct cnic_local *cp = dev->cnic_priv;
2620
2621 if (new_dev)
2622 cnic_ulp_init(dev);
2623 else if (event == NETDEV_UNREGISTER)
2624 cnic_ulp_exit(dev);
2625 else if (event == NETDEV_UP) {
2626 mutex_lock(&cnic_lock);
2627 if (!cnic_start_hw(dev))
2628 cnic_ulp_start(dev);
2629 mutex_unlock(&cnic_lock);
2630 }
2631
2632 rcu_read_lock();
2633 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2634 struct cnic_ulp_ops *ulp_ops;
2635 void *ctx;
2636
2637 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
2638 if (!ulp_ops || !ulp_ops->indicate_netevent)
2639 continue;
2640
2641 ctx = cp->ulp_handle[if_type];
2642
2643 ulp_ops->indicate_netevent(ctx, event);
2644 }
2645 rcu_read_unlock();
2646
2647 if (event == NETDEV_GOING_DOWN) {
2648 mutex_lock(&cnic_lock);
2649 cnic_ulp_stop(dev);
2650 cnic_stop_hw(dev);
2651 mutex_unlock(&cnic_lock);
2652 } else if (event == NETDEV_UNREGISTER) {
2653 write_lock(&cnic_dev_lock);
2654 list_del_init(&dev->list);
2655 write_unlock(&cnic_dev_lock);
2656
2657 cnic_put(dev);
2658 cnic_free_dev(dev);
2659 goto done;
2660 }
2661 cnic_put(dev);
2662 }
2663done:
2664 return NOTIFY_DONE;
2665}
2666
2667static struct notifier_block cnic_netdev_notifier = {
2668 .notifier_call = cnic_netdev_event
2669};
2670
2671static void cnic_release(void)
2672{
2673 struct cnic_dev *dev;
2674
2675 while (!list_empty(&cnic_dev_list)) {
2676 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
2677 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2678 cnic_ulp_stop(dev);
2679 cnic_stop_hw(dev);
2680 }
2681
2682 cnic_ulp_exit(dev);
2683 list_del_init(&dev->list);
2684 cnic_free_dev(dev);
2685 }
2686}
2687
2688static int __init cnic_init(void)
2689{
2690 int rc = 0;
2691
2692 printk(KERN_INFO "%s", version);
2693
2694 rc = register_netdevice_notifier(&cnic_netdev_notifier);
2695 if (rc) {
2696 cnic_release();
2697 return rc;
2698 }
2699
2700 return 0;
2701}
2702
2703static void __exit cnic_exit(void)
2704{
2705 unregister_netdevice_notifier(&cnic_netdev_notifier);
2706 cnic_release();
2707 return;
2708}
2709
2710module_init(cnic_init);
2711module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 000000000000..5192d4a9df5a
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
1/* cnic.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_H
13#define CNIC_H
14
15#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16
17
18#define KWQ_CID 24
19#define KCQ_CID 25
20
21/*
22 * krnlq_context definition
23 */
24#define L5_KRNLQ_FLAGS 0x00000000
25#define L5_KRNLQ_SIZE 0x00000000
26#define L5_KRNLQ_TYPE 0x00000000
27#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
28#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
29#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
30#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
31#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
32#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
33#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
34#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
35#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
36#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
37#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
38#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
39#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
40#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
41#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
42#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
43#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
44#define KRNLQ_TYPE_TYPE (0xf<<28)
45#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
46#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
47
48#define L5_KRNLQ_HOST_QIDX 0x00000004
49#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
50#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
51#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
52#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
53#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
54#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
55#define L5_KRNLQ_NX_PG_QIDX 0x00000018
56#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
57#define L5_KRNLQ_QIDX_INCR 0x0000001c
58#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
59#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
60
61#define BNX2_PG_CTX_MAP 0x1a0034
62#define BNX2_ISCSI_CTX_MAP 0x1a0074
63
64struct cnic_redirect_entry {
65 struct dst_entry *old_dst;
66 struct dst_entry *new_dst;
67};
68
69#define MAX_COMPLETED_KCQE 64
70
71#define MAX_CNIC_L5_CONTEXT 256
72
73#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
74
75#define MAX_ISCSI_TBL_SZ 256
76
77#define CNIC_LOCAL_PORT_MIN 60000
78#define CNIC_LOCAL_PORT_MAX 61000
79#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
80
81#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
82#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
83#define MAX_KWQE_CNT (KWQE_CNT - 1)
84#define MAX_KCQE_CNT (KCQE_CNT - 1)
85
86#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
87#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
88
89#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
90#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
91
92#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
93#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
94
95#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
96 (MAX_KCQE_CNT - 1)) ? \
97 (x) + 2 : (x) + 1
98
99#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
100#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
101#define BNX2X_KWQ_DATA(cp, x) \
102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
103
104#define DEF_IPID_COUNT 0xc001
105
106#define DEF_KA_TIMEOUT 10000
107#define DEF_KA_INTERVAL 300000
108#define DEF_KA_MAX_PROBE_COUNT 3
109#define DEF_TOS 0
110#define DEF_TTL 0xfe
111#define DEF_SND_SEQ_SCALE 0
112#define DEF_RCV_BUF 0xffff
113#define DEF_SND_BUF 0xffff
114#define DEF_SEED 0
115#define DEF_MAX_RT_TIME 500
116#define DEF_MAX_DA_COUNT 2
117#define DEF_SWS_TIMER 1000
118#define DEF_MAX_CWND 0xffff
119
120struct cnic_ctx {
121 u32 cid;
122 void *ctx;
123 dma_addr_t mapping;
124};
125
126#define BNX2_MAX_CID 0x2000
127
128struct cnic_dma {
129 int num_pages;
130 void **pg_arr;
131 dma_addr_t *pg_map_arr;
132 int pgtbl_size;
133 u32 *pgtbl;
134 dma_addr_t pgtbl_map;
135};
136
137struct cnic_id_tbl {
138 spinlock_t lock;
139 u32 start;
140 u32 max;
141 u32 next;
142 unsigned long *table;
143};
144
145#define CNIC_KWQ16_DATA_SIZE 128
146
147struct kwqe_16_data {
148 u8 data[CNIC_KWQ16_DATA_SIZE];
149};
150
151struct cnic_iscsi {
152 struct cnic_dma task_array_info;
153 struct cnic_dma r2tq_info;
154 struct cnic_dma hq_info;
155};
156
157struct cnic_context {
158 u32 cid;
159 struct kwqe_16_data *kwqe_data;
160 dma_addr_t kwqe_data_mapping;
161 wait_queue_head_t waitq;
162 int wait_cond;
163 unsigned long timestamp;
164 u32 ctx_flags;
165#define CTX_FL_OFFLD_START 0x00000001
166 u8 ulp_proto_id;
167 union {
168 struct cnic_iscsi *iscsi;
169 } proto;
170};
171
172struct cnic_local {
173
174 spinlock_t cnic_ulp_lock;
175 void *ulp_handle[MAX_CNIC_ULP_TYPE];
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0
178#define ULP_F_START 1
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180
181 /* protected by ulp_lock */
182 u32 cnic_local_flags;
183#define CNIC_LCL_FL_KWQ_INIT 0x00000001
184
185 struct cnic_dev *dev;
186
187 struct cnic_eth_dev *ethdev;
188
189 void *l2_ring;
190 dma_addr_t l2_ring_map;
191 int l2_ring_size;
192 int l2_rx_ring_size;
193
194 void *l2_buf;
195 dma_addr_t l2_buf_map;
196 int l2_buf_size;
197 int l2_single_buf_size;
198
199 u16 *rx_cons_ptr;
200 u16 *tx_cons_ptr;
201 u16 rx_cons;
202 u16 tx_cons;
203
204 u32 kwq_cid_addr;
205 u32 kcq_cid_addr;
206
207 struct cnic_dma kwq_info;
208 struct kwqe **kwq;
209
210 struct cnic_dma kwq_16_data_info;
211
212 u16 max_kwq_idx;
213
214 u16 kwq_prod_idx;
215 u32 kwq_io_addr;
216
217 u16 *kwq_con_idx_ptr;
218 u16 kwq_con_idx;
219
220 struct cnic_dma kcq_info;
221 struct kcqe **kcq;
222
223 u16 kcq_prod_idx;
224 u32 kcq_io_addr;
225
226 void *status_blk;
227 struct status_block_msix *bnx2_status_blk;
228 struct host_status_block *bnx2x_status_blk;
229
230 u32 status_blk_num;
231 u32 int_num;
232 u32 last_status_idx;
233 struct tasklet_struct cnic_irq_task;
234
235 struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
236
237 struct cnic_sock *csk_tbl;
238 struct cnic_id_tbl csk_port_tbl;
239
240 struct cnic_dma conn_buf_info;
241 struct cnic_dma gbl_buf_info;
242
243 struct cnic_iscsi *iscsi_tbl;
244 struct cnic_context *ctx_tbl;
245 struct cnic_id_tbl cid_tbl;
246 int max_iscsi_conn;
247 atomic_t iscsi_conn;
248
249 /* per connection parameters */
250 int num_iscsi_tasks;
251 int num_ccells;
252 int task_array_size;
253 int r2tq_size;
254 int hq_size;
255 int num_cqs;
256
257 struct cnic_ctx *ctx_arr;
258 int ctx_blks;
259 int ctx_blk_size;
260 int cids_per_blk;
261
262 u32 chip_id;
263 int func;
264 u32 shmem_base;
265
266 u32 uio_dev;
267 struct uio_info *cnic_uinfo;
268
269 struct cnic_ops *cnic_ops;
270 int (*start_hw)(struct cnic_dev *);
271 void (*stop_hw)(struct cnic_dev *);
272 void (*setup_pgtbl)(struct cnic_dev *,
273 struct cnic_dma *);
274 int (*alloc_resc)(struct cnic_dev *);
275 void (*free_resc)(struct cnic_dev *);
276 int (*start_cm)(struct cnic_dev *);
277 void (*stop_cm)(struct cnic_dev *);
278 void (*enable_int)(struct cnic_dev *);
279 void (*disable_int_sync)(struct cnic_dev *);
280 void (*ack_int)(struct cnic_dev *);
281 void (*close_conn)(struct cnic_sock *, u32 opcode);
282 u16 (*next_idx)(u16);
283 u16 (*hw_idx)(u16);
284};
285
286struct bnx2x_bd_chain_next {
287 u32 addr_lo;
288 u32 addr_hi;
289 u8 reserved[8];
290};
291
292#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
293#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
294
295#define CDU_REGION_NUMBER_XCM_AG 2
296#define CDU_REGION_NUMBER_UCM_AG 4
297
298#endif
299
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 000000000000..cee80f694457
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
1
2/* cnic.c: Broadcom CNIC core network driver.
3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#ifndef CNIC_DEFS_H
13#define CNIC_DEFS_H
14
15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
20#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
21#define L4_KWQE_OPCODE_VALUE_RESET (53)
22#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
23#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
24#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
25
26#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
27#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
28#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
29
30#define L5CM_RAMROD_CMD_ID_BASE (0x80)
31#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
32#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
33#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
34#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
35#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
36
37/* KCQ (kernel completion queue) response op codes */
38#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
39#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
40#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
41#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
42#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
43#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
44#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
45
46#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
47#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49
50/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53
54#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2)
56
57/*
58 * L4 KCQ CQE
59 */
60struct l4_kcq {
61 u32 cid;
62 u32 pg_cid;
63 u32 conn_id;
64 u32 pg_host_opaque;
65#if defined(__BIG_ENDIAN)
66 u16 status;
67 u16 reserved1;
68#elif defined(__LITTLE_ENDIAN)
69 u16 reserved1;
70 u16 status;
71#endif
72 u32 reserved2[2];
73#if defined(__BIG_ENDIAN)
74 u8 flags;
75#define L4_KCQ_RESERVED3 (0x7<<0)
76#define L4_KCQ_RESERVED3_SHIFT 0
77#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
78#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
79#define L4_KCQ_LAYER_CODE (0x7<<4)
80#define L4_KCQ_LAYER_CODE_SHIFT 4
81#define L4_KCQ_RESERVED4 (0x1<<7)
82#define L4_KCQ_RESERVED4_SHIFT 7
83 u8 op_code;
84 u16 qe_self_seq;
85#elif defined(__LITTLE_ENDIAN)
86 u16 qe_self_seq;
87 u8 op_code;
88 u8 flags;
89#define L4_KCQ_RESERVED3 (0xF<<0)
90#define L4_KCQ_RESERVED3_SHIFT 0
91#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
92#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
93#define L4_KCQ_LAYER_CODE (0x7<<4)
94#define L4_KCQ_LAYER_CODE_SHIFT 4
95#define L4_KCQ_RESERVED4 (0x1<<7)
96#define L4_KCQ_RESERVED4_SHIFT 7
97#endif
98};
99
100
101/*
102 * L4 KCQ CQE PG upload
103 */
104struct l4_kcq_upload_pg {
105 u32 pg_cid;
106#if defined(__BIG_ENDIAN)
107 u16 pg_status;
108 u16 pg_ipid_count;
109#elif defined(__LITTLE_ENDIAN)
110 u16 pg_ipid_count;
111 u16 pg_status;
112#endif
113 u32 reserved1[5];
114#if defined(__BIG_ENDIAN)
115 u8 flags;
116#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
117#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
118#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
119#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
120#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
121#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
122 u8 op_code;
123 u16 qe_self_seq;
124#elif defined(__LITTLE_ENDIAN)
125 u16 qe_self_seq;
126 u8 op_code;
127 u8 flags;
128#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
129#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
130#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
131#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
132#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
133#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
134#endif
135};
136
137
138/*
139 * Gracefully close the connection request
140 */
141struct l4_kwq_close_req {
142#if defined(__BIG_ENDIAN)
143 u8 flags;
144#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
145#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
146#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
147#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
148#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
149#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
150 u8 op_code;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 op_code;
155 u8 flags;
156#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
157#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
158#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
159#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
160#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
161#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
162#endif
163 u32 cid;
164 u32 reserved2[6];
165};
166
167
168/*
169 * The first request to be passed in order to establish connection in option2
170 */
171struct l4_kwq_connect_req1 {
172#if defined(__BIG_ENDIAN)
173 u8 flags;
174#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
175#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
176#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
177#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
178#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
179#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
180 u8 op_code;
181 u8 reserved0;
182 u8 conn_flags;
183#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
184#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
185#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
186#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
187#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
188#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
189#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
190#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
191#elif defined(__LITTLE_ENDIAN)
192 u8 conn_flags;
193#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
194#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
195#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
196#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
197#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
198#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
199#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
200#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
201 u8 reserved0;
202 u8 op_code;
203 u8 flags;
204#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
205#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
206#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
207#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
208#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
209#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
210#endif
211 u32 cid;
212 u32 pg_cid;
213 u32 src_ip;
214 u32 dst_ip;
215#if defined(__BIG_ENDIAN)
216 u16 dst_port;
217 u16 src_port;
218#elif defined(__LITTLE_ENDIAN)
219 u16 src_port;
220 u16 dst_port;
221#endif
222#if defined(__BIG_ENDIAN)
223 u8 rsrv1[3];
224 u8 tcp_flags;
225#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
226#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
227#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
228#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
229#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
230#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
231#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
232#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
233#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
234#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
235#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
236#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
237#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
238#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
239#elif defined(__LITTLE_ENDIAN)
240 u8 tcp_flags;
241#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
242#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
243#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
244#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
245#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
246#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
247#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
248#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
249#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
250#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
251#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
252#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
253#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
254#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
255 u8 rsrv1[3];
256#endif
257 u32 rsrv2;
258};
259
260
261/*
262 * The second ( optional )request to be passed in order to establish
263 * connection in option2 - for IPv6 only
264 */
265struct l4_kwq_connect_req2 {
266#if defined(__BIG_ENDIAN)
267 u8 flags;
268#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
269#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
270#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
271#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
272#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
273#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
274 u8 op_code;
275 u8 reserved0;
276 u8 rsrv;
277#elif defined(__LITTLE_ENDIAN)
278 u8 rsrv;
279 u8 reserved0;
280 u8 op_code;
281 u8 flags;
282#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
283#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
284#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
285#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
286#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
287#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
288#endif
289 u32 reserved2;
290 u32 src_ip_v6_2;
291 u32 src_ip_v6_3;
292 u32 src_ip_v6_4;
293 u32 dst_ip_v6_2;
294 u32 dst_ip_v6_3;
295 u32 dst_ip_v6_4;
296};
297
298
299/*
300 * The third ( and last )request to be passed in order to establish
301 * connection in option2
302 */
303struct l4_kwq_connect_req3 {
304#if defined(__BIG_ENDIAN)
305 u8 flags;
306#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
307#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
308#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
309#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
310#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
311#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
312 u8 op_code;
313 u16 reserved0;
314#elif defined(__LITTLE_ENDIAN)
315 u16 reserved0;
316 u8 op_code;
317 u8 flags;
318#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
319#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
320#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
321#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
322#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
323#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
324#endif
325 u32 ka_timeout;
326 u32 ka_interval ;
327#if defined(__BIG_ENDIAN)
328 u8 snd_seq_scale;
329 u8 ttl;
330 u8 tos;
331 u8 ka_max_probe_count;
332#elif defined(__LITTLE_ENDIAN)
333 u8 ka_max_probe_count;
334 u8 tos;
335 u8 ttl;
336 u8 snd_seq_scale;
337#endif
338#if defined(__BIG_ENDIAN)
339 u16 pmtu;
340 u16 mss;
341#elif defined(__LITTLE_ENDIAN)
342 u16 mss;
343 u16 pmtu;
344#endif
345 u32 rcv_buf;
346 u32 snd_buf;
347 u32 seed;
348};
349
350
351/*
352 * a KWQE request to offload a PG connection
353 */
354struct l4_kwq_offload_pg {
355#if defined(__BIG_ENDIAN)
356 u8 flags;
357#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
358#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
359#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
360#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
361#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
362#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
363 u8 op_code;
364 u16 reserved0;
365#elif defined(__LITTLE_ENDIAN)
366 u16 reserved0;
367 u8 op_code;
368 u8 flags;
369#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
370#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
371#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
372#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
373#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
374#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
375#endif
376#if defined(__BIG_ENDIAN)
377 u8 l2hdr_nbytes;
378 u8 pg_flags;
379#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
380#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
381#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
382#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
383#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
384#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
385 u8 da0;
386 u8 da1;
387#elif defined(__LITTLE_ENDIAN)
388 u8 da1;
389 u8 da0;
390 u8 pg_flags;
391#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
392#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
393#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
394#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
395#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
396#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
397 u8 l2hdr_nbytes;
398#endif
399#if defined(__BIG_ENDIAN)
400 u8 da2;
401 u8 da3;
402 u8 da4;
403 u8 da5;
404#elif defined(__LITTLE_ENDIAN)
405 u8 da5;
406 u8 da4;
407 u8 da3;
408 u8 da2;
409#endif
410#if defined(__BIG_ENDIAN)
411 u8 sa0;
412 u8 sa1;
413 u8 sa2;
414 u8 sa3;
415#elif defined(__LITTLE_ENDIAN)
416 u8 sa3;
417 u8 sa2;
418 u8 sa1;
419 u8 sa0;
420#endif
421#if defined(__BIG_ENDIAN)
422 u8 sa4;
423 u8 sa5;
424 u16 etype;
425#elif defined(__LITTLE_ENDIAN)
426 u16 etype;
427 u8 sa5;
428 u8 sa4;
429#endif
430#if defined(__BIG_ENDIAN)
431 u16 vlan_tag;
432 u16 ipid_start;
433#elif defined(__LITTLE_ENDIAN)
434 u16 ipid_start;
435 u16 vlan_tag;
436#endif
437#if defined(__BIG_ENDIAN)
438 u16 ipid_count;
439 u16 reserved3;
440#elif defined(__LITTLE_ENDIAN)
441 u16 reserved3;
442 u16 ipid_count;
443#endif
444 u32 host_opaque;
445};
446
447
448/*
449 * Abortively close the connection request
450 */
451struct l4_kwq_reset_req {
452#if defined(__BIG_ENDIAN)
453 u8 flags;
454#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
455#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
456#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
457#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
458#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
459#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
460 u8 op_code;
461 u16 reserved0;
462#elif defined(__LITTLE_ENDIAN)
463 u16 reserved0;
464 u8 op_code;
465 u8 flags;
466#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
467#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
468#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
469#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
470#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
471#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
472#endif
473 u32 cid;
474 u32 reserved2[6];
475};
476
477
478/*
479 * a KWQE request to update a PG connection
480 */
481struct l4_kwq_update_pg {
482#if defined(__BIG_ENDIAN)
483 u8 flags;
484#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
485#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
486#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
487#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
488#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
489#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
490 u8 opcode;
491 u16 oper16;
492#elif defined(__LITTLE_ENDIAN)
493 u16 oper16;
494 u8 opcode;
495 u8 flags;
496#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
497#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
498#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
499#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
500#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
501#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
502#endif
503 u32 pg_cid;
504 u32 pg_host_opaque;
505#if defined(__BIG_ENDIAN)
506 u8 pg_valids;
507#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
508#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
509#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
510#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
511#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
512#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
513 u8 pg_unused_a;
514 u16 pg_ipid_count;
515#elif defined(__LITTLE_ENDIAN)
516 u16 pg_ipid_count;
517 u8 pg_unused_a;
518 u8 pg_valids;
519#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
520#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
521#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
522#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
523#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
524#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
525#endif
526#if defined(__BIG_ENDIAN)
527 u16 reserverd3;
528 u8 da0;
529 u8 da1;
530#elif defined(__LITTLE_ENDIAN)
531 u8 da1;
532 u8 da0;
533 u16 reserverd3;
534#endif
535#if defined(__BIG_ENDIAN)
536 u8 da2;
537 u8 da3;
538 u8 da4;
539 u8 da5;
540#elif defined(__LITTLE_ENDIAN)
541 u8 da5;
542 u8 da4;
543 u8 da3;
544 u8 da2;
545#endif
546 u32 reserved4;
547 u32 reserved5;
548};
549
550
551/*
552 * a KWQE request to upload a PG or L4 context
553 */
554struct l4_kwq_upload {
555#if defined(__BIG_ENDIAN)
556 u8 flags;
557#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
558#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
559#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
560#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
561#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
562#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
563 u8 opcode;
564 u16 oper16;
565#elif defined(__LITTLE_ENDIAN)
566 u16 oper16;
567 u8 opcode;
568 u8 flags;
569#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
570#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
571#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
572#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
573#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
574#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
575#endif
576 u32 cid;
577 u32 reserved2[6];
578};
579
580#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 000000000000..06380963a34e
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
1/* cnic_if.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
15#define CNIC_MODULE_VERSION "2.0.0"
16#define CNIC_MODULE_RELDATE "May 21, 2009"
17
18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2
21#define MAX_CNIC_ULP_TYPE_EXT 2
22#define MAX_CNIC_ULP_TYPE 3
23
24struct kwqe {
25 u32 kwqe_op_flag;
26
27#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
31
32 u32 kwqe_info0;
33 u32 kwqe_info1;
34 u32 kwqe_info2;
35 u32 kwqe_info3;
36 u32 kwqe_info4;
37 u32 kwqe_info5;
38 u32 kwqe_info6;
39};
40
41struct kwqe_16 {
42 u32 kwqe_info0;
43 u32 kwqe_info1;
44 u32 kwqe_info2;
45 u32 kwqe_info3;
46};
47
48struct kcqe {
49 u32 kcqe_info0;
50 u32 kcqe_info1;
51 u32 kcqe_info2;
52 u32 kcqe_info3;
53 u32 kcqe_info4;
54 u32 kcqe_info5;
55 u32 kcqe_info6;
56 u32 kcqe_op_flag;
57 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
58 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
59 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
60 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
61 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
65 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16)
68 #define KCQE_OPCODE(op) \
69 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
70};
71
72#define MAX_CNIC_CTL_DATA 64
73#define MAX_DRV_CTL_DATA 64
74
75#define CNIC_CTL_STOP_CMD 1
76#define CNIC_CTL_START_CMD 2
77#define CNIC_CTL_COMPLETION_CMD 3
78
79#define DRV_CTL_IO_WR_CMD 0x101
80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105
84
85struct cnic_ctl_completion {
86 u32 cid;
87};
88
89struct drv_ctl_completion {
90 u32 comp_count;
91};
92
93struct cnic_ctl_info {
94 int cmd;
95 union {
96 struct cnic_ctl_completion comp;
97 char bytes[MAX_CNIC_CTL_DATA];
98 } data;
99};
100
101struct drv_ctl_io {
102 u32 cid_addr;
103 u32 offset;
104 u32 data;
105 dma_addr_t dma_addr;
106};
107
108struct drv_ctl_info {
109 int cmd;
110 union {
111 struct drv_ctl_completion comp;
112 struct drv_ctl_io io;
113 char bytes[MAX_DRV_CTL_DATA];
114 } data;
115};
116
117struct cnic_ops {
118 struct module *cnic_owner;
119 /* Calls to these functions are protected by RCU. When
120 * unregistering, we wait for any calls to complete before
121 * continuing.
122 */
123 int (*cnic_handler)(void *, void *);
124 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
125};
126
127#define MAX_CNIC_VEC 8
128
129struct cnic_irq {
130 unsigned int vector;
131 void *status_blk;
132 u32 status_blk_num;
133 u32 irq_flags;
134#define CNIC_IRQ_FL_MSIX 0x00000001
135};
136
137struct cnic_eth_dev {
138 struct module *drv_owner;
139 u32 drv_state;
140#define CNIC_DRV_STATE_REGD 0x00000001
141#define CNIC_DRV_STATE_USING_MSIX 0x00000002
142 u32 chip_id;
143 u32 max_kwqe_pending;
144 struct pci_dev *pdev;
145 void __iomem *io_base;
146
147 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len;
149 int ctx_blk_size;
150 u32 starting_cid;
151 u32 max_iscsi_conn;
152 u32 max_fcoe_conn;
153 u32 max_rdma_conn;
154 u32 reserved0[2];
155
156 int num_irq;
157 struct cnic_irq irq_arr[MAX_CNIC_VEC];
158 int (*drv_register_cnic)(struct net_device *,
159 struct cnic_ops *, void *);
160 int (*drv_unregister_cnic)(struct net_device *);
161 int (*drv_submit_kwqes_32)(struct net_device *,
162 struct kwqe *[], u32);
163 int (*drv_submit_kwqes_16)(struct net_device *,
164 struct kwqe_16 *[], u32);
165 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
166 unsigned long reserved1[2];
167};
168
169struct cnic_sockaddr {
170 union {
171 struct sockaddr_in v4;
172 struct sockaddr_in6 v6;
173 } local;
174 union {
175 struct sockaddr_in v4;
176 struct sockaddr_in6 v6;
177 } remote;
178};
179
180struct cnic_sock {
181 struct cnic_dev *dev;
182 void *context;
183 u32 src_ip[4];
184 u32 dst_ip[4];
185 u16 src_port;
186 u16 dst_port;
187 u16 vlan_id;
188 unsigned char old_ha[6];
189 unsigned char ha[6];
190 u32 mtu;
191 u32 cid;
192 u32 l5_cid;
193 u32 pg_cid;
194 int ulp_type;
195
196 u32 ka_timeout;
197 u32 ka_interval;
198 u8 ka_max_probe_count;
199 u8 tos;
200 u8 ttl;
201 u8 snd_seq_scale;
202 u32 rcv_buf;
203 u32 snd_buf;
204 u32 seed;
205
206 unsigned long tcp_flags;
207#define SK_TCP_NO_DELAY_ACK 0x1
208#define SK_TCP_KEEP_ALIVE 0x2
209#define SK_TCP_NAGLE 0x4
210#define SK_TCP_TIMESTAMP 0x8
211#define SK_TCP_SACK 0x10
212#define SK_TCP_SEG_SCALING 0x20
213 unsigned long flags;
214#define SK_F_INUSE 0
215#define SK_F_OFFLD_COMPLETE 1
216#define SK_F_OFFLD_SCHED 2
217#define SK_F_PG_OFFLD_COMPLETE 3
218#define SK_F_CONNECT_START 4
219#define SK_F_IPV6 5
220#define SK_F_CLOSING 7
221
222 atomic_t ref_count;
223 u32 state;
224 struct kwqe kwqe1;
225 struct kwqe kwqe2;
226 struct kwqe kwqe3;
227};
228
229struct cnic_dev {
230 struct net_device *netdev;
231 struct pci_dev *pcidev;
232 void __iomem *regview;
233 struct list_head list;
234
235 int (*register_device)(struct cnic_dev *dev, int ulp_type,
236 void *ulp_ctx);
237 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
238 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
239 u32 num_wqes);
240 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
241 u32 num_wqes);
242
243 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
244 void *);
245 int (*cm_destroy)(struct cnic_sock *);
246 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
247 int (*cm_abort)(struct cnic_sock *);
248 int (*cm_close)(struct cnic_sock *);
249 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
250 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
251 char *data, u16 data_size);
252 unsigned long flags;
253#define CNIC_F_CNIC_UP 1
254#define CNIC_F_BNX2_CLASS 3
255#define CNIC_F_BNX2X_CLASS 4
256 atomic_t ref_count;
257 u8 mac_addr[6];
258
259 int max_iscsi_conn;
260 int max_fcoe_conn;
261 int max_rdma_conn;
262
263 void *cnic_priv;
264};
265
266#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
267#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
268#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
269#define CNIC_RD(dev, off) readl(dev->regview + off)
270#define CNIC_RD16(dev, off) readw(dev->regview + off)
271
272struct cnic_ulp_ops {
273 /* Calls to these functions are protected by RCU. When
274 * unregistering, we wait for any calls to complete before
275 * continuing.
276 */
277
278 void (*cnic_init)(struct cnic_dev *dev);
279 void (*cnic_exit)(struct cnic_dev *dev);
280 void (*cnic_start)(void *ulp_ctx);
281 void (*cnic_stop)(void *ulp_ctx);
282 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
283 u32 num_cqes);
284 void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
285 void (*cm_connect_complete)(struct cnic_sock *);
286 void (*cm_close_complete)(struct cnic_sock *);
287 void (*cm_abort_complete)(struct cnic_sock *);
288 void (*cm_remote_close)(struct cnic_sock *);
289 void (*cm_remote_abort)(struct cnic_sock *);
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size);
292 struct module *owner;
293};
294
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
296
297extern int cnic_unregister_driver(int ulp_type);
298
299#endif
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4d1d47953fc6..7fa620ddeb21 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -845,6 +845,10 @@ static int virtnet_probe(struct virtio_device *vdev)
845 int err; 845 int err;
846 struct net_device *dev; 846 struct net_device *dev;
847 struct virtnet_info *vi; 847 struct virtnet_info *vi;
848 struct virtqueue *vqs[3];
849 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
850 const char *names[] = { "input", "output", "control" };
851 int nvqs;
848 852
849 /* Allocate ourselves a network device with room for our info */ 853 /* Allocate ourselves a network device with room for our info */
850 dev = alloc_etherdev(sizeof(struct virtnet_info)); 854 dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -905,25 +909,19 @@ static int virtnet_probe(struct virtio_device *vdev)
905 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 909 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
906 vi->mergeable_rx_bufs = true; 910 vi->mergeable_rx_bufs = true;
907 911
908 /* We expect two virtqueues, receive then send. */ 912 /* We expect two virtqueues, receive then send,
909 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 913 * and optionally control. */
910 if (IS_ERR(vi->rvq)) { 914 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
911 err = PTR_ERR(vi->rvq); 915
916 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
917 if (err)
912 goto free; 918 goto free;
913 }
914 919
915 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); 920 vi->rvq = vqs[0];
916 if (IS_ERR(vi->svq)) { 921 vi->svq = vqs[1];
917 err = PTR_ERR(vi->svq);
918 goto free_recv;
919 }
920 922
921 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 923 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
922 vi->cvq = vdev->config->find_vq(vdev, 2, NULL); 924 vi->cvq = vqs[2];
923 if (IS_ERR(vi->cvq)) {
924 err = PTR_ERR(vi->svq);
925 goto free_send;
926 }
927 925
928 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 926 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
929 dev->features |= NETIF_F_HW_VLAN_FILTER; 927 dev->features |= NETIF_F_HW_VLAN_FILTER;
@@ -941,7 +939,7 @@ static int virtnet_probe(struct virtio_device *vdev)
941 err = register_netdev(dev); 939 err = register_netdev(dev);
942 if (err) { 940 if (err) {
943 pr_debug("virtio_net: registering device failed\n"); 941 pr_debug("virtio_net: registering device failed\n");
944 goto free_ctrl; 942 goto free_vqs;
945 } 943 }
946 944
947 /* Last of all, set up some receive buffers. */ 945 /* Last of all, set up some receive buffers. */
@@ -962,13 +960,8 @@ static int virtnet_probe(struct virtio_device *vdev)
962 960
963unregister: 961unregister:
964 unregister_netdev(dev); 962 unregister_netdev(dev);
965free_ctrl: 963free_vqs:
966 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) 964 vdev->config->del_vqs(vdev);
967 vdev->config->del_vq(vi->cvq);
968free_send:
969 vdev->config->del_vq(vi->svq);
970free_recv:
971 vdev->config->del_vq(vi->rvq);
972free: 965free:
973 free_netdev(dev); 966 free_netdev(dev);
974 return err; 967 return err;
@@ -994,12 +987,10 @@ static void virtnet_remove(struct virtio_device *vdev)
994 987
995 BUG_ON(vi->num != 0); 988 BUG_ON(vi->num != 0);
996 989
997 vdev->config->del_vq(vi->svq);
998 vdev->config->del_vq(vi->rvq);
999 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
1000 vdev->config->del_vq(vi->cvq);
1001 unregister_netdev(vi->dev); 990 unregister_netdev(vi->dev);
1002 991
992 vdev->config->del_vqs(vi->vdev);
993
1003 while (vi->pages) 994 while (vi->pages)
1004 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 995 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1005 996
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index f821dbc952a4..27f3b81333de 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -1,21 +1,21 @@
1config OF_DEVICE 1config OF_DEVICE
2 def_bool y 2 def_bool y
3 depends on OF && (SPARC || PPC_OF) 3 depends on OF && (SPARC || PPC_OF || MICROBLAZE)
4 4
5config OF_GPIO 5config OF_GPIO
6 def_bool y 6 def_bool y
7 depends on OF && PPC_OF && GPIOLIB 7 depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB
8 help 8 help
9 OpenFirmware GPIO accessors 9 OpenFirmware GPIO accessors
10 10
11config OF_I2C 11config OF_I2C
12 def_tristate I2C 12 def_tristate I2C
13 depends on PPC_OF && I2C 13 depends on (PPC_OF || MICROBLAZE) && I2C
14 help 14 help
15 OpenFirmware I2C accessors 15 OpenFirmware I2C accessors
16 16
17config OF_SPI 17config OF_SPI
18 def_tristate SPI 18 def_tristate SPI
19 depends on OF && PPC_OF && SPI 19 depends on OF && (PPC_OF || MICROBLAZE) && SPI
20 help 20 help
21 OpenFirmware SPI accessors 21 OpenFirmware SPI accessors
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index f604061d2bb0..ba9765427886 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -638,6 +638,24 @@ int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start,
638} 638}
639EXPORT_SYMBOL(pnp_possible_config); 639EXPORT_SYMBOL(pnp_possible_config);
640 640
641int pnp_range_reserved(resource_size_t start, resource_size_t end)
642{
643 struct pnp_dev *dev;
644 struct pnp_resource *pnp_res;
645 resource_size_t *dev_start, *dev_end;
646
647 pnp_for_each_dev(dev) {
648 list_for_each_entry(pnp_res, &dev->resources, list) {
649 dev_start = &pnp_res->res.start;
650 dev_end = &pnp_res->res.end;
651 if (ranged_conflict(&start, &end, dev_start, dev_end))
652 return 1;
653 }
654 }
655 return 0;
656}
657EXPORT_SYMBOL(pnp_range_reserved);
658
641/* format is: pnp_reserve_irq=irq1[,irq2] .... */ 659/* format is: pnp_reserve_irq=irq1[,irq2] .... */
642static int __init pnp_setup_reserve_irq(char *str) 660static int __init pnp_setup_reserve_irq(char *str)
643{ 661{
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index cbc8566fab70..e38e5d306faf 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq)
173 * this device and sets it up. 173 * this device and sets it up.
174 */ 174 */
175static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, 175static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
176 unsigned index, 176 unsigned index,
177 void (*callback)(struct virtqueue *vq)) 177 void (*callback)(struct virtqueue *vq),
178 const char *name)
178{ 179{
179 struct kvm_device *kdev = to_kvmdev(vdev); 180 struct kvm_device *kdev = to_kvmdev(vdev);
180 struct kvm_vqconfig *config; 181 struct kvm_vqconfig *config;
@@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
194 195
195 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, 196 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
196 vdev, (void *) config->address, 197 vdev, (void *) config->address,
197 kvm_notify, callback); 198 kvm_notify, callback, name);
198 if (!vq) { 199 if (!vq) {
199 err = -ENOMEM; 200 err = -ENOMEM;
200 goto unmap; 201 goto unmap;
@@ -226,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq)
226 KVM_S390_VIRTIO_RING_ALIGN)); 227 KVM_S390_VIRTIO_RING_ALIGN));
227} 228}
228 229
230static void kvm_del_vqs(struct virtio_device *vdev)
231{
232 struct virtqueue *vq, *n;
233
234 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
235 kvm_del_vq(vq);
236}
237
238static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
239 struct virtqueue *vqs[],
240 vq_callback_t *callbacks[],
241 const char *names[])
242{
243 struct kvm_device *kdev = to_kvmdev(vdev);
244 int i;
245
246 /* We must have this many virtqueues. */
247 if (nvqs > kdev->desc->num_vq)
248 return -ENOENT;
249
250 for (i = 0; i < nvqs; ++i) {
251 vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
252 if (IS_ERR(vqs[i]))
253 goto error;
254 }
255 return 0;
256
257error:
258 kvm_del_vqs(vdev);
259 return PTR_ERR(vqs[i]);
260}
261
229/* 262/*
230 * The config ops structure as defined by virtio config 263 * The config ops structure as defined by virtio config
231 */ 264 */
@@ -237,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
237 .get_status = kvm_get_status, 270 .get_status = kvm_get_status,
238 .set_status = kvm_set_status, 271 .set_status = kvm_set_status,
239 .reset = kvm_reset, 272 .reset = kvm_reset,
240 .find_vq = kvm_find_vq, 273 .find_vqs = kvm_find_vqs,
241 .del_vq = kvm_del_vq, 274 .del_vqs = kvm_del_vqs,
242}; 275};
243 276
244/* 277/*
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 733fe3bf6285..b2fe5cdbcaee 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -11,6 +11,24 @@
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13 13
14#define ZFCP_MODEL_PRIV 0x4
15
16static struct ccw_device_id zfcp_ccw_device_id[] = {
17 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
18 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
19 {},
20};
21MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
22
23/**
24 * zfcp_ccw_priv_sch - check if subchannel is privileged
25 * @adapter: Adapter/Subchannel to check
26 */
27int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
28{
29 return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
30}
31
14/** 32/**
15 * zfcp_ccw_probe - probe function of zfcp driver 33 * zfcp_ccw_probe - probe function of zfcp driver
16 * @ccw_device: pointer to belonging ccw device 34 * @ccw_device: pointer to belonging ccw device
@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
176 "ccnoti4", NULL); 194 "ccnoti4", NULL);
177 break; 195 break;
178 case CIO_BOXED: 196 case CIO_BOXED:
179 dev_warn(&adapter->ccw_device->dev, 197 dev_warn(&adapter->ccw_device->dev, "The FCP device "
180 "The ccw device did not respond in time.\n"); 198 "did not respond within the specified time\n");
181 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 199 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
182 break; 200 break;
183 } 201 }
@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
199 up(&zfcp_data.config_sema); 217 up(&zfcp_data.config_sema);
200} 218}
201 219
202static struct ccw_device_id zfcp_ccw_device_id[] = {
203 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
204 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
205 {},
206};
207
208MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
209
210static struct ccw_driver zfcp_ccw_driver = { 220static struct ccw_driver zfcp_ccw_driver = {
211 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
212 .name = "zfcp", 222 .name = "zfcp",
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0a1a5dd8d018..b99b87ce5a39 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
163 } 163 }
164 164
165 response->fsf_command = fsf_req->fsf_command; 165 response->fsf_command = fsf_req->fsf_command;
166 response->fsf_reqid = (unsigned long)fsf_req; 166 response->fsf_reqid = fsf_req->req_id;
167 response->fsf_seqno = fsf_req->seq_no; 167 response->fsf_seqno = fsf_req->seq_no;
168 response->fsf_issued = fsf_req->issued; 168 response->fsf_issued = fsf_req->issued;
169 response->fsf_prot_status = qtcb->prefix.prot_status; 169 response->fsf_prot_status = qtcb->prefix.prot_status;
@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
737 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 737 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
738 memset(r, 0, sizeof(*r)); 738 memset(r, 0, sizeof(*r));
739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
740 r->fsf_reqid = (unsigned long)fsf_req; 740 r->fsf_reqid = fsf_req->req_id;
741 r->fsf_seqno = fsf_req->seq_no; 741 r->fsf_seqno = fsf_req->seq_no;
742 r->s_id = fc_host_port_id(adapter->scsi_host); 742 r->s_id = fc_host_port_id(adapter->scsi_host);
743 r->d_id = wka_port->d_id; 743 r->d_id = wka_port->d_id;
@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
773 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 773 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
774 memset(r, 0, sizeof(*r)); 774 memset(r, 0, sizeof(*r));
775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
776 r->fsf_reqid = (unsigned long)fsf_req; 776 r->fsf_reqid = fsf_req->req_id;
777 r->fsf_seqno = fsf_req->seq_no; 777 r->fsf_seqno = fsf_req->seq_no;
778 r->s_id = wka_port->d_id; 778 r->s_id = wka_port->d_id;
779 r->d_id = fc_host_port_id(adapter->scsi_host); 779 r->d_id = fc_host_port_id(adapter->scsi_host);
@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
803 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 803 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
804 memset(rec, 0, sizeof(*rec)); 804 memset(rec, 0, sizeof(*rec));
805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
806 rec->fsf_reqid = (unsigned long)fsf_req; 806 rec->fsf_reqid = fsf_req->req_id;
807 rec->fsf_seqno = fsf_req->seq_no; 807 rec->fsf_seqno = fsf_req->seq_no;
808 rec->s_id = s_id; 808 rec->s_id = s_id;
809 rec->d_id = d_id; 809 rec->d_id = d_id;
@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
965 ZFCP_DBF_SCSI_FCP_SNS_INFO); 965 ZFCP_DBF_SCSI_FCP_SNS_INFO);
966 } 966 }
967 967
968 rec->fsf_reqid = (unsigned long)fsf_req; 968 rec->fsf_reqid = fsf_req->req_id;
969 rec->fsf_seqno = fsf_req->seq_no; 969 rec->fsf_seqno = fsf_req->seq_no;
970 rec->fsf_issued = fsf_req->issued; 970 rec->fsf_issued = fsf_req->issued;
971 } 971 }
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 4c362a9069f0..2074d45dbf6c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -47,13 +47,6 @@
47 47
48/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 48/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
49 49
50/* Adapter Identification Parameters */
51#define ZFCP_CONTROL_UNIT_TYPE 0x1731
52#define ZFCP_CONTROL_UNIT_MODEL 0x03
53#define ZFCP_DEVICE_TYPE 0x1732
54#define ZFCP_DEVICE_MODEL 0x03
55#define ZFCP_DEVICE_MODEL_PRIV 0x04
56
57/* DMQ bug workaround: don't use last SBALE */ 50/* DMQ bug workaround: don't use last SBALE */
58#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 51#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
59 52
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fdc9b4352a64..e50ea465bc2b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
880 zfcp_port_put(port); 880 zfcp_port_put(port);
881 return ZFCP_ERP_CONTINUES; 881 return ZFCP_ERP_CONTINUES;
882 } 882 }
883 /* fall through */
883 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 884 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
884 if (!port->d_id) 885 if (!port->d_id)
885 return ZFCP_ERP_FAILED; 886 return ZFCP_ERP_FAILED;
@@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
894 act->step = ZFCP_ERP_STEP_PORT_CLOSING; 895 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
895 return ZFCP_ERP_CONTINUES; 896 return ZFCP_ERP_CONTINUES;
896 } 897 }
897 /* fall through otherwise */
898 } 898 }
899 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
900 port->d_id = 0;
901 _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
902 return ZFCP_ERP_EXIT;
903 }
904 /* fall through otherwise */
899 } 905 }
900 return ZFCP_ERP_FAILED; 906 return ZFCP_ERP_FAILED;
901} 907}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2e31b536548c..120a9a1c81f7 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
27 27
28/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
30extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); 31extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
31 32
32/* zfcp_cfdc.c */ 33/* zfcp_cfdc.c */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 19ae0842047c..bb2752b4130f 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
150 struct zfcp_port *port; 150 struct zfcp_port *port;
151 151
152 read_lock_irqsave(&zfcp_data.config_lock, flags); 152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) 153 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
154 if ((port->d_id & range) == (elem->nport_did & range)) 154 if ((port->d_id & range) == (elem->nport_did & range))
155 zfcp_test_link(port); 155 zfcp_test_link(port);
156 if (!port->d_id)
157 zfcp_erp_port_reopen(port,
158 ZFCP_STATUS_COMMON_ERP_FAILED,
159 "fcrscn1", NULL);
160 }
156 161
157 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 162 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
158} 163}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 74dee32afba8..e6dae3744e79 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
526 break; 526 break;
527 case FSF_TOPO_AL: 527 case FSF_TOPO_AL:
528 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 528 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
529 /* fall through */
529 default: 530 default:
530 dev_err(&adapter->ccw_device->dev, 531 dev_err(&adapter->ccw_device->dev,
531 "Unknown or unsupported arbitrated loop " 532 "Unknown or unsupported arbitrated loop "
@@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
897 switch (fsq->word[0]) { 898 switch (fsq->word[0]) {
898 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 899 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
899 zfcp_test_link(unit->port); 900 zfcp_test_link(unit->port);
901 /* fall through */
900 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 902 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
901 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 903 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
902 break; 904 break;
@@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
993 break; 995 break;
994 case FSF_PORT_HANDLE_NOT_VALID: 996 case FSF_PORT_HANDLE_NOT_VALID:
995 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 997 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
998 /* fall through */
996 case FSF_GENERIC_COMMAND_REJECTED: 999 case FSF_GENERIC_COMMAND_REJECTED:
997 case FSF_PAYLOAD_SIZE_MISMATCH: 1000 case FSF_PAYLOAD_SIZE_MISMATCH:
998 case FSF_REQUEST_SIZE_TOO_LARGE: 1001 case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1399 struct fsf_plogi *plogi; 1402 struct fsf_plogi *plogi;
1400 1403
1401 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1404 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1402 return; 1405 goto out;
1403 1406
1404 switch (header->fsf_status) { 1407 switch (header->fsf_status) {
1405 case FSF_PORT_ALREADY_OPEN: 1408 case FSF_PORT_ALREADY_OPEN:
@@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1461 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1464 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1462 break; 1465 break;
1463 } 1466 }
1467
1468out:
1469 zfcp_port_put(port);
1464} 1470}
1465 1471
1466/** 1472/**
@@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1473 struct qdio_buffer_element *sbale; 1479 struct qdio_buffer_element *sbale;
1474 struct zfcp_adapter *adapter = erp_action->adapter; 1480 struct zfcp_adapter *adapter = erp_action->adapter;
1475 struct zfcp_fsf_req *req; 1481 struct zfcp_fsf_req *req;
1482 struct zfcp_port *port = erp_action->port;
1476 int retval = -EIO; 1483 int retval = -EIO;
1477 1484
1478 spin_lock_bh(&adapter->req_q_lock); 1485 spin_lock_bh(&adapter->req_q_lock);
@@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1493 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1500 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1494 1501
1495 req->handler = zfcp_fsf_open_port_handler; 1502 req->handler = zfcp_fsf_open_port_handler;
1496 req->qtcb->bottom.support.d_id = erp_action->port->d_id; 1503 req->qtcb->bottom.support.d_id = port->d_id;
1497 req->data = erp_action->port; 1504 req->data = port;
1498 req->erp_action = erp_action; 1505 req->erp_action = erp_action;
1499 erp_action->fsf_req = req; 1506 erp_action->fsf_req = req;
1507 zfcp_port_get(port);
1500 1508
1501 zfcp_fsf_start_erp_timer(req); 1509 zfcp_fsf_start_erp_timer(req);
1502 retval = zfcp_fsf_req_send(req); 1510 retval = zfcp_fsf_req_send(req);
1503 if (retval) { 1511 if (retval) {
1504 zfcp_fsf_req_free(req); 1512 zfcp_fsf_req_free(req);
1505 erp_action->fsf_req = NULL; 1513 erp_action->fsf_req = NULL;
1514 zfcp_port_put(port);
1506 } 1515 }
1507out: 1516out:
1508 spin_unlock_bh(&adapter->req_q_lock); 1517 spin_unlock_bh(&adapter->req_q_lock);
@@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1590 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1599 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1591 dev_warn(&req->adapter->ccw_device->dev, 1600 dev_warn(&req->adapter->ccw_device->dev,
1592 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1601 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1602 /* fall through */
1593 case FSF_ADAPTER_STATUS_AVAILABLE: 1603 case FSF_ADAPTER_STATUS_AVAILABLE:
1594 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1604 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1605 /* fall through */
1595 case FSF_ACCESS_DENIED: 1606 case FSF_ACCESS_DENIED:
1596 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1607 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1597 break; 1608 break;
@@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1876 1887
1877 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && 1888 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1878 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && 1889 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1879 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) { 1890 !zfcp_ccw_priv_sch(adapter)) {
1880 exclusive = (bottom->lun_access_info & 1891 exclusive = (bottom->lun_access_info &
1881 FSF_UNIT_ACCESS_EXCLUSIVE); 1892 FSF_UNIT_ACCESS_EXCLUSIVE);
1882 readwrite = (bottom->lun_access_info & 1893 readwrite = (bottom->lun_access_info &
@@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2314{ 2325{
2315 struct zfcp_fsf_req *req; 2326 struct zfcp_fsf_req *req;
2316 struct fcp_cmnd_iu *fcp_cmnd_iu; 2327 struct fcp_cmnd_iu *fcp_cmnd_iu;
2317 unsigned int sbtype; 2328 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2318 int real_bytes, retval = -EIO; 2329 int real_bytes, retval = -EIO;
2319 struct zfcp_adapter *adapter = unit->port->adapter; 2330 struct zfcp_adapter *adapter = unit->port->adapter;
2320 2331
@@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2356 switch (scsi_cmnd->sc_data_direction) { 2367 switch (scsi_cmnd->sc_data_direction) {
2357 case DMA_NONE: 2368 case DMA_NONE:
2358 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2369 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2359 sbtype = SBAL_FLAGS0_TYPE_READ;
2360 break; 2370 break;
2361 case DMA_FROM_DEVICE: 2371 case DMA_FROM_DEVICE:
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2372 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2363 sbtype = SBAL_FLAGS0_TYPE_READ;
2364 fcp_cmnd_iu->rddata = 1; 2373 fcp_cmnd_iu->rddata = 1;
2365 break; 2374 break;
2366 case DMA_TO_DEVICE: 2375 case DMA_TO_DEVICE:
@@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2369 fcp_cmnd_iu->wddata = 1; 2378 fcp_cmnd_iu->wddata = 1;
2370 break; 2379 break;
2371 case DMA_BIDIRECTIONAL: 2380 case DMA_BIDIRECTIONAL:
2372 default:
2373 retval = -EIO;
2374 goto failed_scsi_cmnd; 2381 goto failed_scsi_cmnd;
2375 } 2382 }
2376 2383
@@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2394 scsi_sglist(scsi_cmnd), 2401 scsi_sglist(scsi_cmnd),
2395 FSF_MAX_SBALS_PER_REQ); 2402 FSF_MAX_SBALS_PER_REQ);
2396 if (unlikely(real_bytes < 0)) { 2403 if (unlikely(real_bytes < 0)) {
2397 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) 2404 if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2398 retval = -EIO;
2399 else {
2400 dev_err(&adapter->ccw_device->dev, 2405 dev_err(&adapter->ccw_device->dev,
2401 "Oversize data package, unit 0x%016Lx " 2406 "Oversize data package, unit 0x%016Lx "
2402 "on port 0x%016Lx closed\n", 2407 "on port 0x%016Lx closed\n",
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e8fbeaeb5fbf..7d0da230eb63 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,10 @@
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14 14
15static unsigned int default_depth = 32;
16module_param_named(queue_depth, default_depth, uint, 0600);
17MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
18
15/* Find start of Sense Information in FCP response unit*/ 19/* Find start of Sense Information in FCP response unit*/
16char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) 20char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
17{ 21{
@@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
24 return fcp_sns_info_ptr; 28 return fcp_sns_info_ptr;
25} 29}
26 30
31static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
32{
33 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
34 return sdev->queue_depth;
35}
36
27static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 37static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
28{ 38{
29 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 39 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
34static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 44static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
35{ 45{
36 if (sdp->tagged_supported) 46 if (sdp->tagged_supported)
37 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32); 47 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
38 else 48 else
39 scsi_adjust_queue_depth(sdp, 0, 1); 49 scsi_adjust_queue_depth(sdp, 0, 1);
40 return 0; 50 return 0;
@@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
647 .name = "zfcp", 657 .name = "zfcp",
648 .module = THIS_MODULE, 658 .module = THIS_MODULE,
649 .proc_name = "zfcp", 659 .proc_name = "zfcp",
660 .change_queue_depth = zfcp_scsi_change_queue_depth,
650 .slave_alloc = zfcp_scsi_slave_alloc, 661 .slave_alloc = zfcp_scsi_slave_alloc,
651 .slave_configure = zfcp_scsi_slave_configure, 662 .slave_configure = zfcp_scsi_slave_configure,
652 .slave_destroy = zfcp_scsi_slave_destroy, 663 .slave_destroy = zfcp_scsi_slave_destroy,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index fb2740789b68..6a19ed9a1194 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
191 it has an enclosure device. Selecting this option will just allow 191 it has an enclosure device. Selecting this option will just allow
192 certain enclosure conditions to be reported and is not required. 192 certain enclosure conditions to be reported and is not required.
193 193
194comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
195 depends on SCSI
196
197config SCSI_MULTI_LUN 194config SCSI_MULTI_LUN
198 bool "Probe all LUNs on each SCSI device" 195 bool "Probe all LUNs on each SCSI device"
199 depends on SCSI 196 depends on SCSI
200 help 197 help
201 If you have a SCSI device that supports more than one LUN (Logical 198 Some devices support more than one LUN (Logical Unit Number) in order
202 Unit Number), e.g. a CD jukebox, and only one LUN is detected, you 199 to allow access to several media, e.g. CD jukebox, USB card reader,
203 can say Y here to force the SCSI driver to probe for multiple LUNs. 200 mobile phone in mass storage mode. This option forces the kernel to
204 A SCSI device with multiple LUNs acts logically like multiple SCSI 201 probe for all LUNs by default. This setting can be overriden by
205 devices. The vast majority of SCSI devices have only one LUN, and 202 max_luns boot/module parameter. Note that this option does not affect
206 so most people can say N here. The max_luns boot/module parameter 203 devices conforming to SCSI-3 or higher as they can explicitely report
207 allows to override this setting. 204 their number of LUNs. It is safe to say Y here unless you have one of
205 those rare devices which reacts in an unexpected way when probed for
206 multiple LUNs.
208 207
209config SCSI_CONSTANTS 208config SCSI_CONSTANTS
210 bool "Verbose SCSI error reporting (kernel size +=12K)" 209 bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
355 http://open-iscsi.org 354 http://open-iscsi.org
356 355
357source "drivers/scsi/cxgb3i/Kconfig" 356source "drivers/scsi/cxgb3i/Kconfig"
357source "drivers/scsi/bnx2i/Kconfig"
358 358
359config SGIWD93_SCSI 359config SGIWD93_SCSI
360 tristate "SGI WD93C93 SCSI Driver" 360 tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
508 508
509source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 509source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
510source "drivers/scsi/aic94xx/Kconfig" 510source "drivers/scsi/aic94xx/Kconfig"
511source "drivers/scsi/mvsas/Kconfig"
511 512
512config SCSI_DPT_I2O 513config SCSI_DPT_I2O
513 tristate "Adaptec I2O RAID support " 514 tristate "Adaptec I2O RAID support "
@@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
1050 1051
1051 Generally, saying N is fine. 1052 Generally, saying N is fine.
1052 1053
1053config SCSI_MVSAS
1054 tristate "Marvell 88SE6440 SAS/SATA support"
1055 depends on PCI && SCSI
1056 select SCSI_SAS_LIBSAS
1057 help
1058 This driver supports Marvell SAS/SATA PCI devices.
1059
1060 To compiler this driver as a module, choose M here: the module
1061 will be called mvsas.
1062
1063config SCSI_NCR53C406A 1054config SCSI_NCR53C406A
1064 tristate "NCR53c406a SCSI support" 1055 tristate "NCR53c406a SCSI support"
1065 depends on ISA && SCSI 1056 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a5049cfb40ed..25429ea63d0a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ 126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
128obj-$(CONFIG_SCSI_STEX) += stex.o 128obj-$(CONFIG_SCSI_STEX) += stex.o
129obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 129obj-$(CONFIG_SCSI_MVSAS) += mvsas/
130obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
132 133
133obj-$(CONFIG_ARM) += arm/ 134obj-$(CONFIG_ARM) += arm/
134 135
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index c889d8458684..1cdf09a4779a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
224 return ret; 224 return ret;
225} 225}
226 226
227static int 227static irqreturn_t
228NCR_D700_intr(int irq, void *data) 228NCR_D700_intr(int irq, void *data)
229{ 229{
230 struct NCR_D700_private *p = (struct NCR_D700_private *)data; 230 struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 000000000000..2fceb19eb27b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_CONSTANTS_H_
12#define __57XX_ISCSI_CONSTANTS_H_
13
14/**
15* This file defines HSI constants for the iSCSI flows
16*/
17
18/* iSCSI request op codes */
19#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
20
21/* iSCSI response/messages op codes */
22#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
23#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
24
25/* iSCSI task types */
26#define ISCSI_TASK_TYPE_READ (0)
27#define ISCSI_TASK_TYPE_WRITE (1)
28#define ISCSI_TASK_TYPE_MPATH (2)
29
30/* initial CQ sequence numbers */
31#define ISCSI_INITIAL_SN (1)
32
33/* KWQ (kernel work queue) layer codes */
34#define ISCSI_KWQE_LAYER_CODE (6)
35
36/* KWQ (kernel work queue) request op codes */
37#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
38#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
39#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
40#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
41#define ISCSI_KWQE_OPCODE_INIT1 (4)
42#define ISCSI_KWQE_OPCODE_INIT2 (5)
43
44/* KCQ (kernel completion queue) response op codes */
45#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
46#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
47#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
48#define ISCSI_KCQE_OPCODE_INIT (0x14)
49#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
50#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
51#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
52#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
53#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
54#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
55#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
56
57/* KCQ (kernel completion queue) completion status */
58#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
59#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
60#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
61#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
62#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
63
64#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
65#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
66
67#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
68#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
69#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
70#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
71#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
72
73/* Response */
74#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
75#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
76#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
77#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
78#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
79#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
80#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
81#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
82#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
83#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
84#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
85
86/* Data-In */
87#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
88#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
89#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
90#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
91#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
92#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
93
94/* R2T */
95#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
96#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
97#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
98#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
99#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
100#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
101#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
102#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
103#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
104
105/* TMF */
106#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
107#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
108#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
109#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
110
111/* IP/TCP processing errors: */
112#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
113#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
114#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
115#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
116
117/* iSCSI licensing errors */
118/* general iSCSI license not installed */
119#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
120/* additional LOM specific iSCSI license not installed */
121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
122
123/* SQ/RQ/CQ DB structure sizes */
124#define ISCSI_SQ_DB_SIZE (16)
125#define ISCSI_RQ_DB_SIZE (16)
126#define ISCSI_CQ_DB_SIZE (80)
127
128#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
129
130/* Page size codes (for flags field in connection offload request) */
131#define ISCSI_PAGE_SIZE_256 (0)
132#define ISCSI_PAGE_SIZE_512 (1)
133#define ISCSI_PAGE_SIZE_1K (2)
134#define ISCSI_PAGE_SIZE_2K (3)
135#define ISCSI_PAGE_SIZE_4K (4)
136#define ISCSI_PAGE_SIZE_8K (5)
137#define ISCSI_PAGE_SIZE_16K (6)
138#define ISCSI_PAGE_SIZE_32K (7)
139#define ISCSI_PAGE_SIZE_64K (8)
140#define ISCSI_PAGE_SIZE_128K (9)
141#define ISCSI_PAGE_SIZE_256K (10)
142#define ISCSI_PAGE_SIZE_512K (11)
143#define ISCSI_PAGE_SIZE_1M (12)
144#define ISCSI_PAGE_SIZE_2M (13)
145#define ISCSI_PAGE_SIZE_4M (14)
146#define ISCSI_PAGE_SIZE_8M (15)
147
148/* Iscsi PDU related defines */
149#define ISCSI_HEADER_SIZE (48)
150#define ISCSI_DIGEST_SHIFT (2)
151#define ISCSI_DIGEST_SIZE (4)
152
153#define B577XX_ISCSI_CONNECTION_TYPE 3
154
155#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 000000000000..36af1afef9b6
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_HSI_LINUX_LE__
12#define __57XX_ISCSI_HSI_LINUX_LE__
13
14/*
15 * iSCSI Async CQE
16 */
17struct bnx2i_async_msg {
18#if defined(__BIG_ENDIAN)
19 u8 op_code;
20 u8 reserved1;
21 u16 reserved0;
22#elif defined(__LITTLE_ENDIAN)
23 u16 reserved0;
24 u8 reserved1;
25 u8 op_code;
26#endif
27 u32 reserved2;
28 u32 exp_cmd_sn;
29 u32 max_cmd_sn;
30 u32 reserved3[2];
31#if defined(__BIG_ENDIAN)
32 u16 reserved5;
33 u8 err_code;
34 u8 reserved4;
35#elif defined(__LITTLE_ENDIAN)
36 u8 reserved4;
37 u8 err_code;
38 u16 reserved5;
39#endif
40 u32 reserved6;
41 u32 lun[2];
42#if defined(__BIG_ENDIAN)
43 u8 async_event;
44 u8 async_vcode;
45 u16 param1;
46#elif defined(__LITTLE_ENDIAN)
47 u16 param1;
48 u8 async_vcode;
49 u8 async_event;
50#endif
51#if defined(__BIG_ENDIAN)
52 u16 param2;
53 u16 param3;
54#elif defined(__LITTLE_ENDIAN)
55 u16 param3;
56 u16 param2;
57#endif
58 u32 reserved7[3];
59 u32 cq_req_sn;
60};
61
62
63/*
64 * iSCSI Buffer Descriptor (BD)
65 */
66struct iscsi_bd {
67 u32 buffer_addr_hi;
68 u32 buffer_addr_lo;
69#if defined(__BIG_ENDIAN)
70 u16 reserved0;
71 u16 buffer_length;
72#elif defined(__LITTLE_ENDIAN)
73 u16 buffer_length;
74 u16 reserved0;
75#endif
76#if defined(__BIG_ENDIAN)
77 u16 reserved3;
78 u16 flags;
79#define ISCSI_BD_RESERVED1 (0x3F<<0)
80#define ISCSI_BD_RESERVED1_SHIFT 0
81#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
82#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
83#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
84#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
85#define ISCSI_BD_RESERVED2 (0xFF<<8)
86#define ISCSI_BD_RESERVED2_SHIFT 8
87#elif defined(__LITTLE_ENDIAN)
88 u16 flags;
89#define ISCSI_BD_RESERVED1 (0x3F<<0)
90#define ISCSI_BD_RESERVED1_SHIFT 0
91#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
92#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
93#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
94#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
95#define ISCSI_BD_RESERVED2 (0xFF<<8)
96#define ISCSI_BD_RESERVED2_SHIFT 8
97 u16 reserved3;
98#endif
99};
100
101
102/*
103 * iSCSI Cleanup SQ WQE
104 */
105struct bnx2i_cleanup_request {
106#if defined(__BIG_ENDIAN)
107 u8 op_code;
108 u8 reserved1;
109 u16 reserved0;
110#elif defined(__LITTLE_ENDIAN)
111 u16 reserved0;
112 u8 reserved1;
113 u8 op_code;
114#endif
115 u32 reserved2[3];
116#if defined(__BIG_ENDIAN)
117 u16 reserved3;
118 u16 itt;
119#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
120#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
121#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
122#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
123#elif defined(__LITTLE_ENDIAN)
124 u16 itt;
125#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
126#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
127#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
128#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
129 u16 reserved3;
130#endif
131 u32 reserved4[10];
132#if defined(__BIG_ENDIAN)
133 u8 cq_index;
134 u8 reserved6;
135 u16 reserved5;
136#elif defined(__LITTLE_ENDIAN)
137 u16 reserved5;
138 u8 reserved6;
139 u8 cq_index;
140#endif
141};
142
143
144/*
145 * iSCSI Cleanup CQE
146 */
147struct bnx2i_cleanup_response {
148#if defined(__BIG_ENDIAN)
149 u8 op_code;
150 u8 status;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 status;
155 u8 op_code;
156#endif
157 u32 reserved1[3];
158 u32 reserved2[2];
159#if defined(__BIG_ENDIAN)
160 u16 reserved4;
161 u8 err_code;
162 u8 reserved3;
163#elif defined(__LITTLE_ENDIAN)
164 u8 reserved3;
165 u8 err_code;
166 u16 reserved4;
167#endif
168 u32 reserved5[7];
169#if defined(__BIG_ENDIAN)
170 u16 reserved6;
171 u16 itt;
172#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
173#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
174#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
175#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
176#elif defined(__LITTLE_ENDIAN)
177 u16 itt;
178#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
179#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
180#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
181#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
182 u16 reserved6;
183#endif
184 u32 cq_req_sn;
185};
186
187
188/*
189 * SCSI read/write SQ WQE
190 */
191struct bnx2i_cmd_request {
192#if defined(__BIG_ENDIAN)
193 u8 op_code;
194 u8 op_attr;
195#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
196#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
197#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
198#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
199#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
200#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
201#define ISCSI_CMD_REQUEST_READ (0x1<<6)
202#define ISCSI_CMD_REQUEST_READ_SHIFT 6
203#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
204#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
205 u16 reserved0;
206#elif defined(__LITTLE_ENDIAN)
207 u16 reserved0;
208 u8 op_attr;
209#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
210#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
211#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
212#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
213#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
214#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
215#define ISCSI_CMD_REQUEST_READ (0x1<<6)
216#define ISCSI_CMD_REQUEST_READ_SHIFT 6
217#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
218#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
219 u8 op_code;
220#endif
221#if defined(__BIG_ENDIAN)
222 u16 ud_buffer_offset;
223 u16 sd_buffer_offset;
224#elif defined(__LITTLE_ENDIAN)
225 u16 sd_buffer_offset;
226 u16 ud_buffer_offset;
227#endif
228 u32 lun[2];
229#if defined(__BIG_ENDIAN)
230 u16 reserved2;
231 u16 itt;
232#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
233#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
234#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
235#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
236#elif defined(__LITTLE_ENDIAN)
237 u16 itt;
238#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
239#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
240#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
241#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
242 u16 reserved2;
243#endif
244 u32 total_data_transfer_length;
245 u32 cmd_sn;
246 u32 reserved3;
247 u32 cdb[4];
248 u32 zero_fill;
249 u32 bd_list_addr_lo;
250 u32 bd_list_addr_hi;
251#if defined(__BIG_ENDIAN)
252 u8 cq_index;
253 u8 sd_start_bd_index;
254 u8 ud_start_bd_index;
255 u8 num_bds;
256#elif defined(__LITTLE_ENDIAN)
257 u8 num_bds;
258 u8 ud_start_bd_index;
259 u8 sd_start_bd_index;
260 u8 cq_index;
261#endif
262};
263
264
265/*
266 * task statistics for write response
267 */
268struct bnx2i_write_resp_task_stat {
269 u32 num_data_ins;
270};
271
272/*
273 * task statistics for read response
274 */
275struct bnx2i_read_resp_task_stat {
276#if defined(__BIG_ENDIAN)
277 u16 num_data_outs;
278 u16 num_r2ts;
279#elif defined(__LITTLE_ENDIAN)
280 u16 num_r2ts;
281 u16 num_data_outs;
282#endif
283};
284
285/*
286 * task statistics for iSCSI cmd response
287 */
288union bnx2i_cmd_resp_task_stat {
289 struct bnx2i_write_resp_task_stat write_stat;
290 struct bnx2i_read_resp_task_stat read_stat;
291};
292
293/*
294 * SCSI Command CQE
295 */
296struct bnx2i_cmd_response {
297#if defined(__BIG_ENDIAN)
298 u8 op_code;
299 u8 response_flags;
300#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
301#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
302#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
303#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
304#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
305#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
306#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
307#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
308#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
309#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
310#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
311#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
312 u8 response;
313 u8 status;
314#elif defined(__LITTLE_ENDIAN)
315 u8 status;
316 u8 response;
317 u8 response_flags;
318#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
319#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
320#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
321#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
322#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
323#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
324#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
325#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
326#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
327#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
328#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
329#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
330 u8 op_code;
331#endif
332 u32 data_length;
333 u32 exp_cmd_sn;
334 u32 max_cmd_sn;
335 u32 reserved2;
336 u32 residual_count;
337#if defined(__BIG_ENDIAN)
338 u16 reserved4;
339 u8 err_code;
340 u8 reserved3;
341#elif defined(__LITTLE_ENDIAN)
342 u8 reserved3;
343 u8 err_code;
344 u16 reserved4;
345#endif
346 u32 reserved5[5];
347 union bnx2i_cmd_resp_task_stat task_stat;
348 u32 reserved6;
349#if defined(__BIG_ENDIAN)
350 u16 reserved7;
351 u16 itt;
352#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
353#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
354#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
355#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
356#elif defined(__LITTLE_ENDIAN)
357 u16 itt;
358#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
359#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
360#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
361#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
362 u16 reserved7;
363#endif
364 u32 cq_req_sn;
365};
366
367
368
369/*
370 * firmware middle-path request SQ WQE
371 */
372struct bnx2i_fw_mp_request {
373#if defined(__BIG_ENDIAN)
374 u8 op_code;
375 u8 op_attr;
376 u16 hdr_opaque1;
377#elif defined(__LITTLE_ENDIAN)
378 u16 hdr_opaque1;
379 u8 op_attr;
380 u8 op_code;
381#endif
382 u32 data_length;
383 u32 hdr_opaque2[2];
384#if defined(__BIG_ENDIAN)
385 u16 reserved0;
386 u16 itt;
387#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
388#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
389#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
390#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
391#elif defined(__LITTLE_ENDIAN)
392 u16 itt;
393#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
394#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
395#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
396#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
397 u16 reserved0;
398#endif
399 u32 hdr_opaque3[4];
400 u32 resp_bd_list_addr_lo;
401 u32 resp_bd_list_addr_hi;
402 u32 resp_buffer;
403#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
404#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
405#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
406#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
407#if defined(__BIG_ENDIAN)
408 u16 reserved4;
409 u8 reserved3;
410 u8 flags;
411#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
412#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
413#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
414#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
415#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
416#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
417#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
418#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
419#elif defined(__LITTLE_ENDIAN)
420 u8 flags;
421#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
422#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
423#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
424#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
425#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
426#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
427#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
428#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
429 u8 reserved3;
430 u16 reserved4;
431#endif
432 u32 bd_list_addr_lo;
433 u32 bd_list_addr_hi;
434#if defined(__BIG_ENDIAN)
435 u8 cq_index;
436 u8 reserved6;
437 u8 reserved5;
438 u8 num_bds;
439#elif defined(__LITTLE_ENDIAN)
440 u8 num_bds;
441 u8 reserved5;
442 u8 reserved6;
443 u8 cq_index;
444#endif
445};
446
447
448/*
449 * firmware response - CQE: used only by firmware
450 */
451struct bnx2i_fw_response {
452 u32 hdr_dword1[2];
453 u32 hdr_exp_cmd_sn;
454 u32 hdr_max_cmd_sn;
455 u32 hdr_ttt;
456 u32 hdr_res_cnt;
457 u32 cqe_flags;
458#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
459#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
460#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
461#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
462#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
463#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
464 u32 stat_sn;
465 u32 hdr_dword2[2];
466 u32 hdr_dword3[2];
467 u32 task_stat;
468 u32 reserved0;
469 u32 hdr_itt;
470 u32 cq_req_sn;
471};
472
473
474/*
475 * iSCSI KCQ CQE parameters
476 */
477union iscsi_kcqe_params {
478 u32 reserved0[4];
479};
480
481/*
482 * iSCSI KCQ CQE
483 */
484struct iscsi_kcqe {
485 u32 iscsi_conn_id;
486 u32 completion_status;
487 u32 iscsi_conn_context_id;
488 union iscsi_kcqe_params params;
489#if defined(__BIG_ENDIAN)
490 u8 flags;
491#define ISCSI_KCQE_RESERVED0 (0xF<<0)
492#define ISCSI_KCQE_RESERVED0_SHIFT 0
493#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
494#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
495#define ISCSI_KCQE_RESERVED1 (0x1<<7)
496#define ISCSI_KCQE_RESERVED1_SHIFT 7
497 u8 op_code;
498 u16 qe_self_seq;
499#elif defined(__LITTLE_ENDIAN)
500 u16 qe_self_seq;
501 u8 op_code;
502 u8 flags;
503#define ISCSI_KCQE_RESERVED0 (0xF<<0)
504#define ISCSI_KCQE_RESERVED0_SHIFT 0
505#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
506#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
507#define ISCSI_KCQE_RESERVED1 (0x1<<7)
508#define ISCSI_KCQE_RESERVED1_SHIFT 7
509#endif
510};
511
512
513
514/*
515 * iSCSI KWQE header
516 */
517struct iscsi_kwqe_header {
518#if defined(__BIG_ENDIAN)
519 u8 flags;
520#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
521#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
522#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
523#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
524#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
525#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
526 u8 op_code;
527#elif defined(__LITTLE_ENDIAN)
528 u8 op_code;
529 u8 flags;
530#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
531#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
532#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
533#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
534#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
535#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
536#endif
537};
538
539/*
540 * iSCSI firmware init request 1
541 */
542struct iscsi_kwqe_init1 {
543#if defined(__BIG_ENDIAN)
544 struct iscsi_kwqe_header hdr;
545 u8 reserved0;
546 u8 num_cqs;
547#elif defined(__LITTLE_ENDIAN)
548 u8 num_cqs;
549 u8 reserved0;
550 struct iscsi_kwqe_header hdr;
551#endif
552 u32 dummy_buffer_addr_lo;
553 u32 dummy_buffer_addr_hi;
554#if defined(__BIG_ENDIAN)
555 u16 num_ccells_per_conn;
556 u16 num_tasks_per_conn;
557#elif defined(__LITTLE_ENDIAN)
558 u16 num_tasks_per_conn;
559 u16 num_ccells_per_conn;
560#endif
561#if defined(__BIG_ENDIAN)
562 u16 sq_wqes_per_page;
563 u16 sq_num_wqes;
564#elif defined(__LITTLE_ENDIAN)
565 u16 sq_num_wqes;
566 u16 sq_wqes_per_page;
567#endif
568#if defined(__BIG_ENDIAN)
569 u8 cq_log_wqes_per_page;
570 u8 flags;
571#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
572#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
573#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
574#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
575#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
576#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
577#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
578#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
579 u16 cq_num_wqes;
580#elif defined(__LITTLE_ENDIAN)
581 u16 cq_num_wqes;
582 u8 flags;
583#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
584#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
585#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
586#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
587#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
588#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
589#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
590#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
591 u8 cq_log_wqes_per_page;
592#endif
593#if defined(__BIG_ENDIAN)
594 u16 cq_num_pages;
595 u16 sq_num_pages;
596#elif defined(__LITTLE_ENDIAN)
597 u16 sq_num_pages;
598 u16 cq_num_pages;
599#endif
600#if defined(__BIG_ENDIAN)
601 u16 rq_buffer_size;
602 u16 rq_num_wqes;
603#elif defined(__LITTLE_ENDIAN)
604 u16 rq_num_wqes;
605 u16 rq_buffer_size;
606#endif
607};
608
609/*
610 * iSCSI firmware init request 2
611 */
612struct iscsi_kwqe_init2 {
613#if defined(__BIG_ENDIAN)
614 struct iscsi_kwqe_header hdr;
615 u16 max_cq_sqn;
616#elif defined(__LITTLE_ENDIAN)
617 u16 max_cq_sqn;
618 struct iscsi_kwqe_header hdr;
619#endif
620 u32 error_bit_map[2];
621 u32 reserved1[5];
622};
623
624/*
625 * Initial iSCSI connection offload request 1
626 */
627struct iscsi_kwqe_conn_offload1 {
628#if defined(__BIG_ENDIAN)
629 struct iscsi_kwqe_header hdr;
630 u16 iscsi_conn_id;
631#elif defined(__LITTLE_ENDIAN)
632 u16 iscsi_conn_id;
633 struct iscsi_kwqe_header hdr;
634#endif
635 u32 sq_page_table_addr_lo;
636 u32 sq_page_table_addr_hi;
637 u32 cq_page_table_addr_lo;
638 u32 cq_page_table_addr_hi;
639 u32 reserved0[3];
640};
641
642/*
643 * iSCSI Page Table Entry (PTE)
644 */
645struct iscsi_pte {
646 u32 hi;
647 u32 lo;
648};
649
650/*
651 * Initial iSCSI connection offload request 2
652 */
653struct iscsi_kwqe_conn_offload2 {
654#if defined(__BIG_ENDIAN)
655 struct iscsi_kwqe_header hdr;
656 u16 reserved0;
657#elif defined(__LITTLE_ENDIAN)
658 u16 reserved0;
659 struct iscsi_kwqe_header hdr;
660#endif
661 u32 rq_page_table_addr_lo;
662 u32 rq_page_table_addr_hi;
663 struct iscsi_pte sq_first_pte;
664 struct iscsi_pte cq_first_pte;
665 u32 num_additional_wqes;
666};
667
668
669/*
670 * Initial iSCSI connection offload request 3
671 */
672struct iscsi_kwqe_conn_offload3 {
673#if defined(__BIG_ENDIAN)
674 struct iscsi_kwqe_header hdr;
675 u16 reserved0;
676#elif defined(__LITTLE_ENDIAN)
677 u16 reserved0;
678 struct iscsi_kwqe_header hdr;
679#endif
680 u32 reserved1;
681 struct iscsi_pte qp_first_pte[3];
682};
683
684
685/*
686 * iSCSI connection update request
687 */
688struct iscsi_kwqe_conn_update {
689#if defined(__BIG_ENDIAN)
690 struct iscsi_kwqe_header hdr;
691 u16 reserved0;
692#elif defined(__LITTLE_ENDIAN)
693 u16 reserved0;
694 struct iscsi_kwqe_header hdr;
695#endif
696#if defined(__BIG_ENDIAN)
697 u8 session_error_recovery_level;
698 u8 max_outstanding_r2ts;
699 u8 reserved2;
700 u8 conn_flags;
701#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
702#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
703#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
704#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
705#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
706#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
707#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
708#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
709#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
710#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
711#elif defined(__LITTLE_ENDIAN)
712 u8 conn_flags;
713#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
714#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
715#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
716#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
717#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
718#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
719#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
720#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
721#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
722#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
723 u8 reserved2;
724 u8 max_outstanding_r2ts;
725 u8 session_error_recovery_level;
726#endif
727 u32 context_id;
728 u32 max_send_pdu_length;
729 u32 max_recv_pdu_length;
730 u32 first_burst_length;
731 u32 max_burst_length;
732 u32 exp_stat_sn;
733};
734
735/*
736 * iSCSI destroy connection request
737 */
738struct iscsi_kwqe_conn_destroy {
739#if defined(__BIG_ENDIAN)
740 struct iscsi_kwqe_header hdr;
741 u16 reserved0;
742#elif defined(__LITTLE_ENDIAN)
743 u16 reserved0;
744 struct iscsi_kwqe_header hdr;
745#endif
746 u32 context_id;
747 u32 reserved1[6];
748};
749
750/*
751 * iSCSI KWQ WQE
752 */
753union iscsi_kwqe {
754 struct iscsi_kwqe_init1 init1;
755 struct iscsi_kwqe_init2 init2;
756 struct iscsi_kwqe_conn_offload1 conn_offload1;
757 struct iscsi_kwqe_conn_offload2 conn_offload2;
758 struct iscsi_kwqe_conn_update conn_update;
759 struct iscsi_kwqe_conn_destroy conn_destroy;
760};
761
762/*
763 * iSCSI Login SQ WQE
764 */
765struct bnx2i_login_request {
766#if defined(__BIG_ENDIAN)
767 u8 op_code;
768 u8 op_attr;
769#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
770#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
771#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
772#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
773#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
774#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
775#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
776#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
777#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
778#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
779 u8 version_max;
780 u8 version_min;
781#elif defined(__LITTLE_ENDIAN)
782 u8 version_min;
783 u8 version_max;
784 u8 op_attr;
785#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
786#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
787#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
788#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
789#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
790#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
791#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
792#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
793#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
794#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
795 u8 op_code;
796#endif
797 u32 data_length;
798 u32 isid_lo;
799#if defined(__BIG_ENDIAN)
800 u16 isid_hi;
801 u16 tsih;
802#elif defined(__LITTLE_ENDIAN)
803 u16 tsih;
804 u16 isid_hi;
805#endif
806#if defined(__BIG_ENDIAN)
807 u16 reserved2;
808 u16 itt;
809#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
810#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
811#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
812#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
813#elif defined(__LITTLE_ENDIAN)
814 u16 itt;
815#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
816#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
817#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
818#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
819 u16 reserved2;
820#endif
821#if defined(__BIG_ENDIAN)
822 u16 cid;
823 u16 reserved3;
824#elif defined(__LITTLE_ENDIAN)
825 u16 reserved3;
826 u16 cid;
827#endif
828 u32 cmd_sn;
829 u32 exp_stat_sn;
830 u32 reserved4;
831 u32 resp_bd_list_addr_lo;
832 u32 resp_bd_list_addr_hi;
833 u32 resp_buffer;
834#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
835#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
836#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
837#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
838#if defined(__BIG_ENDIAN)
839 u16 reserved8;
840 u8 reserved7;
841 u8 flags;
842#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
843#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
844#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
845#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
846#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
847#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
848#elif defined(__LITTLE_ENDIAN)
849 u8 flags;
850#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
851#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
852#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
853#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
854#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
855#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
856 u8 reserved7;
857 u16 reserved8;
858#endif
859 u32 bd_list_addr_lo;
860 u32 bd_list_addr_hi;
861#if defined(__BIG_ENDIAN)
862 u8 cq_index;
863 u8 reserved10;
864 u8 reserved9;
865 u8 num_bds;
866#elif defined(__LITTLE_ENDIAN)
867 u8 num_bds;
868 u8 reserved9;
869 u8 reserved10;
870 u8 cq_index;
871#endif
872};
873
874
875/*
876 * iSCSI Login CQE
877 */
878struct bnx2i_login_response {
879#if defined(__BIG_ENDIAN)
880 u8 op_code;
881 u8 response_flags;
882#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
883#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
884#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
885#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
886#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
887#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
888#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
889#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
890#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
891#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
892 u8 version_max;
893 u8 version_active;
894#elif defined(__LITTLE_ENDIAN)
895 u8 version_active;
896 u8 version_max;
897 u8 response_flags;
898#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
899#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
900#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
901#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
902#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
903#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
904#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
905#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
906#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
907#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
908 u8 op_code;
909#endif
910 u32 data_length;
911 u32 exp_cmd_sn;
912 u32 max_cmd_sn;
913 u32 reserved1[2];
914#if defined(__BIG_ENDIAN)
915 u16 reserved3;
916 u8 err_code;
917 u8 reserved2;
918#elif defined(__LITTLE_ENDIAN)
919 u8 reserved2;
920 u8 err_code;
921 u16 reserved3;
922#endif
923 u32 stat_sn;
924 u32 isid_lo;
925#if defined(__BIG_ENDIAN)
926 u16 isid_hi;
927 u16 tsih;
928#elif defined(__LITTLE_ENDIAN)
929 u16 tsih;
930 u16 isid_hi;
931#endif
932#if defined(__BIG_ENDIAN)
933 u8 status_class;
934 u8 status_detail;
935 u16 reserved4;
936#elif defined(__LITTLE_ENDIAN)
937 u16 reserved4;
938 u8 status_detail;
939 u8 status_class;
940#endif
941 u32 reserved5[3];
942#if defined(__BIG_ENDIAN)
943 u16 reserved6;
944 u16 itt;
945#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
946#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
947#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
948#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
949#elif defined(__LITTLE_ENDIAN)
950 u16 itt;
951#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
952#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
953#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
954#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
955 u16 reserved6;
956#endif
957 u32 cq_req_sn;
958};
959
960
961/*
962 * iSCSI Logout SQ WQE
963 */
964struct bnx2i_logout_request {
965#if defined(__BIG_ENDIAN)
966 u8 op_code;
967 u8 op_attr;
968#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
969#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
970#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
971#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
972 u16 reserved0;
973#elif defined(__LITTLE_ENDIAN)
974 u16 reserved0;
975 u8 op_attr;
976#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
977#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
978#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
979#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
980 u8 op_code;
981#endif
982 u32 data_length;
983 u32 reserved1[2];
984#if defined(__BIG_ENDIAN)
985 u16 reserved2;
986 u16 itt;
987#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
988#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
989#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
990#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
991#elif defined(__LITTLE_ENDIAN)
992 u16 itt;
993#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
994#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
995#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
996#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
997 u16 reserved2;
998#endif
999#if defined(__BIG_ENDIAN)
1000 u16 cid;
1001 u16 reserved3;
1002#elif defined(__LITTLE_ENDIAN)
1003 u16 reserved3;
1004 u16 cid;
1005#endif
1006 u32 cmd_sn;
1007 u32 reserved4[5];
1008 u32 zero_fill;
1009 u32 bd_list_addr_lo;
1010 u32 bd_list_addr_hi;
1011#if defined(__BIG_ENDIAN)
1012 u8 cq_index;
1013 u8 reserved6;
1014 u8 reserved5;
1015 u8 num_bds;
1016#elif defined(__LITTLE_ENDIAN)
1017 u8 num_bds;
1018 u8 reserved5;
1019 u8 reserved6;
1020 u8 cq_index;
1021#endif
1022};
1023
1024
1025/*
1026 * iSCSI Logout CQE
1027 */
1028struct bnx2i_logout_response {
1029#if defined(__BIG_ENDIAN)
1030 u8 op_code;
1031 u8 reserved1;
1032 u8 response;
1033 u8 reserved0;
1034#elif defined(__LITTLE_ENDIAN)
1035 u8 reserved0;
1036 u8 response;
1037 u8 reserved1;
1038 u8 op_code;
1039#endif
1040 u32 reserved2;
1041 u32 exp_cmd_sn;
1042 u32 max_cmd_sn;
1043 u32 reserved3[2];
1044#if defined(__BIG_ENDIAN)
1045 u16 reserved5;
1046 u8 err_code;
1047 u8 reserved4;
1048#elif defined(__LITTLE_ENDIAN)
1049 u8 reserved4;
1050 u8 err_code;
1051 u16 reserved5;
1052#endif
1053 u32 reserved6[3];
1054#if defined(__BIG_ENDIAN)
1055 u16 time_to_wait;
1056 u16 time_to_retain;
1057#elif defined(__LITTLE_ENDIAN)
1058 u16 time_to_retain;
1059 u16 time_to_wait;
1060#endif
1061 u32 reserved7[3];
1062#if defined(__BIG_ENDIAN)
1063 u16 reserved8;
1064 u16 itt;
1065#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1066#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1067#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1068#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1069#elif defined(__LITTLE_ENDIAN)
1070 u16 itt;
1071#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1072#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1073#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1074#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1075 u16 reserved8;
1076#endif
1077 u32 cq_req_sn;
1078};
1079
1080
1081/*
1082 * iSCSI Nop-In CQE
1083 */
1084struct bnx2i_nop_in_msg {
1085#if defined(__BIG_ENDIAN)
1086 u8 op_code;
1087 u8 reserved1;
1088 u16 reserved0;
1089#elif defined(__LITTLE_ENDIAN)
1090 u16 reserved0;
1091 u8 reserved1;
1092 u8 op_code;
1093#endif
1094 u32 data_length;
1095 u32 exp_cmd_sn;
1096 u32 max_cmd_sn;
1097 u32 ttt;
1098 u32 reserved2;
1099#if defined(__BIG_ENDIAN)
1100 u16 reserved4;
1101 u8 err_code;
1102 u8 reserved3;
1103#elif defined(__LITTLE_ENDIAN)
1104 u8 reserved3;
1105 u8 err_code;
1106 u16 reserved4;
1107#endif
1108 u32 reserved5;
1109 u32 lun[2];
1110 u32 reserved6[4];
1111#if defined(__BIG_ENDIAN)
1112 u16 reserved7;
1113 u16 itt;
1114#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1115#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1116#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1117#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1118#elif defined(__LITTLE_ENDIAN)
1119 u16 itt;
1120#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1121#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1122#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1123#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1124 u16 reserved7;
1125#endif
1126 u32 cq_req_sn;
1127};
1128
1129
1130/*
1131 * iSCSI NOP-OUT SQ WQE
1132 */
1133struct bnx2i_nop_out_request {
1134#if defined(__BIG_ENDIAN)
1135 u8 op_code;
1136 u8 op_attr;
1137#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1138#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1139#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1140#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1141 u16 reserved0;
1142#elif defined(__LITTLE_ENDIAN)
1143 u16 reserved0;
1144 u8 op_attr;
1145#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1146#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1147#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1148#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1149 u8 op_code;
1150#endif
1151 u32 data_length;
1152 u32 lun[2];
1153#if defined(__BIG_ENDIAN)
1154 u16 reserved2;
1155 u16 itt;
1156#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1157#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1158#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1159#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1160#elif defined(__LITTLE_ENDIAN)
1161 u16 itt;
1162#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1163#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1164#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1165#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1166 u16 reserved2;
1167#endif
1168 u32 ttt;
1169 u32 cmd_sn;
1170 u32 reserved3[2];
1171 u32 resp_bd_list_addr_lo;
1172 u32 resp_bd_list_addr_hi;
1173 u32 resp_buffer;
1174#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1175#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1176#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1177#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
1178#if defined(__BIG_ENDIAN)
1179 u16 reserved7;
1180 u8 reserved6;
1181 u8 flags;
1182#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1183#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1184#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1185#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1186#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1187#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1188#elif defined(__LITTLE_ENDIAN)
1189 u8 flags;
1190#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1191#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1192#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1193#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1194#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1195#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1196 u8 reserved6;
1197 u16 reserved7;
1198#endif
1199 u32 bd_list_addr_lo;
1200 u32 bd_list_addr_hi;
1201#if defined(__BIG_ENDIAN)
1202 u8 cq_index;
1203 u8 reserved9;
1204 u8 reserved8;
1205 u8 num_bds;
1206#elif defined(__LITTLE_ENDIAN)
1207 u8 num_bds;
1208 u8 reserved8;
1209 u8 reserved9;
1210 u8 cq_index;
1211#endif
1212};
1213
1214/*
1215 * iSCSI Reject CQE
1216 */
1217struct bnx2i_reject_msg {
1218#if defined(__BIG_ENDIAN)
1219 u8 op_code;
1220 u8 reserved1;
1221 u8 reason;
1222 u8 reserved0;
1223#elif defined(__LITTLE_ENDIAN)
1224 u8 reserved0;
1225 u8 reason;
1226 u8 reserved1;
1227 u8 op_code;
1228#endif
1229 u32 data_length;
1230 u32 exp_cmd_sn;
1231 u32 max_cmd_sn;
1232 u32 reserved2[2];
1233#if defined(__BIG_ENDIAN)
1234 u16 reserved4;
1235 u8 err_code;
1236 u8 reserved3;
1237#elif defined(__LITTLE_ENDIAN)
1238 u8 reserved3;
1239 u8 err_code;
1240 u16 reserved4;
1241#endif
1242 u32 reserved5[8];
1243 u32 cq_req_sn;
1244};
1245
1246/*
1247 * bnx2i iSCSI TMF SQ WQE
1248 */
1249struct bnx2i_tmf_request {
1250#if defined(__BIG_ENDIAN)
1251 u8 op_code;
1252 u8 op_attr;
1253#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1254#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1255#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1256#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1257 u16 reserved0;
1258#elif defined(__LITTLE_ENDIAN)
1259 u16 reserved0;
1260 u8 op_attr;
1261#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1262#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1263#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1264#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1265 u8 op_code;
1266#endif
1267 u32 data_length;
1268 u32 lun[2];
1269#if defined(__BIG_ENDIAN)
1270 u16 reserved1;
1271 u16 itt;
1272#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1273#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1274#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1275#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1276#elif defined(__LITTLE_ENDIAN)
1277 u16 itt;
1278#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1279#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1280#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1281#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1282 u16 reserved1;
1283#endif
1284 u32 ref_itt;
1285 u32 cmd_sn;
1286 u32 reserved2;
1287 u32 ref_cmd_sn;
1288 u32 reserved3[3];
1289 u32 zero_fill;
1290 u32 bd_list_addr_lo;
1291 u32 bd_list_addr_hi;
1292#if defined(__BIG_ENDIAN)
1293 u8 cq_index;
1294 u8 reserved5;
1295 u8 reserved4;
1296 u8 num_bds;
1297#elif defined(__LITTLE_ENDIAN)
1298 u8 num_bds;
1299 u8 reserved4;
1300 u8 reserved5;
1301 u8 cq_index;
1302#endif
1303};
1304
1305/*
1306 * iSCSI Text SQ WQE
1307 */
1308struct bnx2i_text_request {
1309#if defined(__BIG_ENDIAN)
1310 u8 op_code;
1311 u8 op_attr;
1312#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1313#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1314#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1315#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1316#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1317#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1318 u16 reserved0;
1319#elif defined(__LITTLE_ENDIAN)
1320 u16 reserved0;
1321 u8 op_attr;
1322#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1323#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1324#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1325#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1326#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1327#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1328 u8 op_code;
1329#endif
1330 u32 data_length;
1331 u32 lun[2];
1332#if defined(__BIG_ENDIAN)
1333 u16 reserved3;
1334 u16 itt;
1335#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1336#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1337#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1338#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1339#elif defined(__LITTLE_ENDIAN)
1340 u16 itt;
1341#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1342#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1343#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1344#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1345 u16 reserved3;
1346#endif
1347 u32 ttt;
1348 u32 cmd_sn;
1349 u32 reserved4[2];
1350 u32 resp_bd_list_addr_lo;
1351 u32 resp_bd_list_addr_hi;
1352 u32 resp_buffer;
1353#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1354#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1355#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1356#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
1357 u32 zero_fill;
1358 u32 bd_list_addr_lo;
1359 u32 bd_list_addr_hi;
1360#if defined(__BIG_ENDIAN)
1361 u8 cq_index;
1362 u8 reserved7;
1363 u8 reserved6;
1364 u8 num_bds;
1365#elif defined(__LITTLE_ENDIAN)
1366 u8 num_bds;
1367 u8 reserved6;
1368 u8 reserved7;
1369 u8 cq_index;
1370#endif
1371};
1372
1373/*
1374 * iSCSI SQ WQE
1375 */
1376union iscsi_request {
1377 struct bnx2i_cmd_request cmd;
1378 struct bnx2i_tmf_request tmf;
1379 struct bnx2i_nop_out_request nop_out;
1380 struct bnx2i_login_request login_req;
1381 struct bnx2i_text_request text;
1382 struct bnx2i_logout_request logout_req;
1383 struct bnx2i_cleanup_request cleanup;
1384};
1385
1386
1387/*
1388 * iSCSI TMF CQE
1389 */
1390struct bnx2i_tmf_response {
1391#if defined(__BIG_ENDIAN)
1392 u8 op_code;
1393 u8 reserved1;
1394 u8 response;
1395 u8 reserved0;
1396#elif defined(__LITTLE_ENDIAN)
1397 u8 reserved0;
1398 u8 response;
1399 u8 reserved1;
1400 u8 op_code;
1401#endif
1402 u32 reserved2;
1403 u32 exp_cmd_sn;
1404 u32 max_cmd_sn;
1405 u32 reserved3[2];
1406#if defined(__BIG_ENDIAN)
1407 u16 reserved5;
1408 u8 err_code;
1409 u8 reserved4;
1410#elif defined(__LITTLE_ENDIAN)
1411 u8 reserved4;
1412 u8 err_code;
1413 u16 reserved5;
1414#endif
1415 u32 reserved6[7];
1416#if defined(__BIG_ENDIAN)
1417 u16 reserved7;
1418 u16 itt;
1419#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1420#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1421#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1422#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1423#elif defined(__LITTLE_ENDIAN)
1424 u16 itt;
1425#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1426#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1427#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1428#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1429 u16 reserved7;
1430#endif
1431 u32 cq_req_sn;
1432};
1433
1434/*
1435 * iSCSI Text CQE
1436 */
1437struct bnx2i_text_response {
1438#if defined(__BIG_ENDIAN)
1439 u8 op_code;
1440 u8 response_flags;
1441#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1442#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1443#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1444#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1445#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1446#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1447 u16 reserved0;
1448#elif defined(__LITTLE_ENDIAN)
1449 u16 reserved0;
1450 u8 response_flags;
1451#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1452#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1453#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1454#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1455#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1456#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1457 u8 op_code;
1458#endif
1459 u32 data_length;
1460 u32 exp_cmd_sn;
1461 u32 max_cmd_sn;
1462 u32 ttt;
1463 u32 reserved2;
1464#if defined(__BIG_ENDIAN)
1465 u16 reserved4;
1466 u8 err_code;
1467 u8 reserved3;
1468#elif defined(__LITTLE_ENDIAN)
1469 u8 reserved3;
1470 u8 err_code;
1471 u16 reserved4;
1472#endif
1473 u32 reserved5;
1474 u32 lun[2];
1475 u32 reserved6[4];
1476#if defined(__BIG_ENDIAN)
1477 u16 reserved7;
1478 u16 itt;
1479#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1480#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1481#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1482#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1483#elif defined(__LITTLE_ENDIAN)
1484 u16 itt;
1485#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1486#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1487#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1488#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1489 u16 reserved7;
1490#endif
1491 u32 cq_req_sn;
1492};
1493
1494/*
1495 * iSCSI CQE
1496 */
1497union iscsi_response {
1498 struct bnx2i_cmd_response cmd;
1499 struct bnx2i_tmf_response tmf;
1500 struct bnx2i_login_response login_resp;
1501 struct bnx2i_text_response text;
1502 struct bnx2i_logout_response logout_resp;
1503 struct bnx2i_cleanup_response cleanup;
1504 struct bnx2i_reject_msg reject;
1505 struct bnx2i_async_msg async;
1506 struct bnx2i_nop_in_msg nop_in;
1507};
1508
1509#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 000000000000..820d428ae839
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_BNX2_ISCSI
2 tristate "Broadcom NetXtreme II iSCSI support"
3 select SCSI_ISCSI_ATTRS
4 select CNIC
5 ---help---
6 This driver supports iSCSI offload for the Broadcom NetXtreme II
7 devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 000000000000..b5802bd2e76a
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
1bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
2
3obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 000000000000..d7576f28c6e9
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#ifndef _BNX2I_H_
15#define _BNX2I_H_
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/in.h>
26#include <linux/kfifo.h>
27#include <linux/netdevice.h>
28#include <linux/completion.h>
29
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi.h>
35#include <scsi/iscsi_proto.h>
36#include <scsi/libiscsi.h>
37#include <scsi/scsi_transport_iscsi.h>
38
39#include "../../net/cnic_if.h"
40#include "57xx_iscsi_hsi.h"
41#include "57xx_iscsi_constants.h"
42
43#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
44
45#define BNX2I_MAX_ADAPTERS 8
46
47#define ISCSI_MAX_CONNS_PER_HBA 128
48#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
49#define ISCSI_MAX_CMDS_PER_SESS 128
50
51/* Total active commands across all connections supported by devices */
52#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
53#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
54#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
55
56#define ISCSI_MAX_BDS_PER_CMD 32
57
58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
60
61/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
62#define MAX_BD_LENGTH 65535
63#define BD_SPLIT_SIZE 32768
64
65/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
66#define BNX2I_SQ_WQES_MIN 16
67#define BNX2I_570X_SQ_WQES_MAX 128
68#define BNX2I_5770X_SQ_WQES_MAX 512
69#define BNX2I_570X_SQ_WQES_DEFAULT 128
70#define BNX2I_5770X_SQ_WQES_DEFAULT 256
71
72#define BNX2I_570X_CQ_WQES_MAX 128
73#define BNX2I_5770X_CQ_WQES_MAX 512
74
75#define BNX2I_RQ_WQES_MIN 16
76#define BNX2I_RQ_WQES_MAX 32
77#define BNX2I_RQ_WQES_DEFAULT 16
78
79/* CCELLs per conn */
80#define BNX2I_CCELLS_MIN 16
81#define BNX2I_CCELLS_MAX 96
82#define BNX2I_CCELLS_DEFAULT 64
83
84#define ITT_INVALID_SIGNATURE 0xFFFF
85
86#define ISCSI_CMD_CLEANUP_TIMEOUT 100
87
88#define BNX2I_CONN_CTX_BUF_SIZE 16384
89
90#define BNX2I_SQ_WQE_SIZE 64
91#define BNX2I_RQ_WQE_SIZE 256
92#define BNX2I_CQE_SIZE 64
93
94#define MB_KERNEL_CTX_SHIFT 8
95#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
96
97#define CTX_SHIFT 7
98#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
99
100#define CTX_OFFSET 0x10000
101#define MAX_CID_CNT 0x4000
102
103/* 5709 context registers */
104#define BNX2_MQ_CONFIG2 0x00003d00
105#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
106#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
107
108/* 57710's BAR2 is mapped to doorbell registers */
109#define BNX2X_DOORBELL_PCI_BAR 2
110#define BNX2X_MAX_CQS 8
111
112#define CNIC_ARM_CQE 1
113#define CNIC_DISARM_CQE 0
114
115#define REG_RD(__hba, offset) \
116 readl(__hba->regview + offset)
117#define REG_WR(__hba, offset, val) \
118 writel(val, __hba->regview + offset)
119
120
121/**
122 * struct generic_pdu_resc - login pdu resource structure
123 *
124 * @req_buf: driver buffer used to stage payload associated with
125 * the login request
126 * @req_dma_addr: dma address for iscsi login request payload buffer
127 * @req_buf_size: actual login request payload length
128 * @req_wr_ptr: pointer into login request buffer when next data is
129 * to be written
130 * @resp_hdr: iscsi header where iscsi login response header is to
131 * be recreated
132 * @resp_buf: buffer to stage login response payload
133 * @resp_dma_addr: login response payload buffer dma address
134 * @resp_buf_size: login response paylod length
135 * @resp_wr_ptr: pointer into login response buffer when next data is
136 * to be written
137 * @req_bd_tbl: iscsi login request payload BD table
138 * @req_bd_dma: login request BD table dma address
139 * @resp_bd_tbl: iscsi login response payload BD table
140 * @resp_bd_dma: login request BD table dma address
141 *
142 * following structure defines buffer info for generic pdus such as iSCSI Login,
143 * Logout and NOP
144 */
145struct generic_pdu_resc {
146 char *req_buf;
147 dma_addr_t req_dma_addr;
148 u32 req_buf_size;
149 char *req_wr_ptr;
150 struct iscsi_hdr resp_hdr;
151 char *resp_buf;
152 dma_addr_t resp_dma_addr;
153 u32 resp_buf_size;
154 char *resp_wr_ptr;
155 char *req_bd_tbl;
156 dma_addr_t req_bd_dma;
157 char *resp_bd_tbl;
158 dma_addr_t resp_bd_dma;
159};
160
161
162/**
163 * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
164 *
165 * @link: list head to link elements
166 * @max_ptrs: maximun pointers that can be stored in this page
167 * @num_valid: number of pointer valid in this page
168 * @page: base addess for page pointer array
169 *
170 * structure to track DMA'able memory allocated for command BD tables
171 */
172struct bd_resc_page {
173 struct list_head link;
174 u32 max_ptrs;
175 u32 num_valid;
176 void *page[1];
177};
178
179
180/**
181 * struct io_bdt - I/O buffer destricptor table
182 *
183 * @bd_tbl: BD table's virtual address
184 * @bd_tbl_dma: BD table's dma address
185 * @bd_valid: num valid BD entries
186 *
187 * IO BD table
188 */
189struct io_bdt {
190 struct iscsi_bd *bd_tbl;
191 dma_addr_t bd_tbl_dma;
192 u16 bd_valid;
193};
194
195
196/**
197 * bnx2i_cmd - iscsi command structure
198 *
199 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
200 * @sg: SG list
201 * @io_tbl: buffer descriptor (BD) table
202 * @bd_tbl_dma: buffer descriptor (BD) table's dma address
203 */
204struct bnx2i_cmd {
205 struct iscsi_hdr hdr;
206 struct bnx2i_conn *conn;
207 struct scsi_cmnd *scsi_cmd;
208 struct scatterlist *sg;
209 struct io_bdt io_tbl;
210 dma_addr_t bd_tbl_dma;
211 struct bnx2i_cmd_request req;
212};
213
214
215/**
216 * struct bnx2i_conn - iscsi connection structure
217 *
218 * @cls_conn: pointer to iscsi cls conn
219 * @hba: adapter structure pointer
220 * @iscsi_conn_cid: iscsi conn id
221 * @fw_cid: firmware iscsi context id
222 * @ep: endpoint structure pointer
223 * @gen_pdu: login/nopout/logout pdu resources
224 * @violation_notified: bit mask used to track iscsi error/warning messages
225 * already printed out
226 *
227 * iSCSI connection structure
228 */
229struct bnx2i_conn {
230 struct iscsi_cls_conn *cls_conn;
231 struct bnx2i_hba *hba;
232 struct completion cmd_cleanup_cmpl;
233 int is_bound;
234
235 u32 iscsi_conn_cid;
236#define BNX2I_CID_RESERVED 0x5AFF
237 u32 fw_cid;
238
239 struct timer_list poll_timer;
240 /*
241 * Queue Pair (QP) related structure elements.
242 */
243 struct bnx2i_endpoint *ep;
244
245 /*
246 * Buffer for login negotiation process
247 */
248 struct generic_pdu_resc gen_pdu;
249 u64 violation_notified;
250};
251
252
253
254/**
255 * struct iscsi_cid_queue - Per adapter iscsi cid queue
256 *
257 * @cid_que_base: queue base memory
258 * @cid_que: queue memory pointer
259 * @cid_q_prod_idx: produce index
260 * @cid_q_cons_idx: consumer index
261 * @cid_q_max_idx: max index. used to detect wrap around condition
262 * @cid_free_cnt: queue size
263 * @conn_cid_tbl: iscsi cid to conn structure mapping table
264 *
265 * Per adapter iSCSI CID Queue
266 */
267struct iscsi_cid_queue {
268 void *cid_que_base;
269 u32 *cid_que;
270 u32 cid_q_prod_idx;
271 u32 cid_q_cons_idx;
272 u32 cid_q_max_idx;
273 u32 cid_free_cnt;
274 struct bnx2i_conn **conn_cid_tbl;
275};
276
277/**
278 * struct bnx2i_hba - bnx2i adapter structure
279 *
280 * @link: list head to link elements
281 * @cnic: pointer to cnic device
282 * @pcidev: pointer to pci dev
283 * @netdev: pointer to netdev structure
284 * @regview: mapped PCI register space
285 * @age: age, incremented by every recovery
286 * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
287 * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
288 * @reg_with_cnic: indicates whether the device is register with CNIC
289 * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
290 * @mtu_supported: Ethernet MTU supported
291 * @shost: scsi host pointer
292 * @max_sqes: SQ size
293 * @max_rqes: RQ size
294 * @max_cqes: CQ size
295 * @num_ccell: number of command cells per connection
296 * @ofld_conns_active: active connection list
297 * @max_active_conns: max offload connections supported by this device
298 * @cid_que: iscsi cid queue
299 * @ep_rdwr_lock: read / write lock to synchronize various ep lists
300 * @ep_ofld_list: connection list for pending offload completion
301 * @ep_destroy_list: connection list for pending offload completion
302 * @mp_bd_tbl: BD table to be used with middle path requests
303 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
304 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
305 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
306 * @lock: lock to synchonize access to hba structure
307 * @pci_did: PCI device ID
308 * @pci_vid: PCI vendor ID
309 * @pci_sdid: PCI subsystem device ID
310 * @pci_svid: PCI subsystem vendor ID
311 * @pci_func: PCI function number in system pci tree
312 * @pci_devno: PCI device number in system pci tree
313 * @num_wqe_sent: statistic counter, total wqe's sent
314 * @num_cqe_rcvd: statistic counter, total cqe's received
315 * @num_intr_claimed: statistic counter, total interrupts claimed
316 * @link_changed_count: statistic counter, num of link change notifications
317 * received
318 * @ipaddr_changed_count: statistic counter, num times IP address changed while
319 * at least one connection is offloaded
320 * @num_sess_opened: statistic counter, total num sessions opened
321 * @num_conn_opened: statistic counter, total num conns opened on this hba
322 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
323 * currently offloaded connection, used to decode
324 * context memory
325 *
326 * Adapter Data Structure
327 */
328struct bnx2i_hba {
329 struct list_head link;
330 struct cnic_dev *cnic;
331 struct pci_dev *pcidev;
332 struct net_device *netdev;
333 void __iomem *regview;
334
335 u32 age;
336 unsigned long cnic_dev_type;
337 #define BNX2I_NX2_DEV_5706 0x0
338 #define BNX2I_NX2_DEV_5708 0x1
339 #define BNX2I_NX2_DEV_5709 0x2
340 #define BNX2I_NX2_DEV_57710 0x3
341 u32 mail_queue_access;
342 #define BNX2I_MQ_KERNEL_MODE 0x0
343 #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
344 #define BNX2I_MQ_BIN_MODE 0x2
345 unsigned long reg_with_cnic;
346 #define BNX2I_CNIC_REGISTERED 1
347
348 unsigned long adapter_state;
349 #define ADAPTER_STATE_UP 0
350 #define ADAPTER_STATE_GOING_DOWN 1
351 #define ADAPTER_STATE_LINK_DOWN 2
352 #define ADAPTER_STATE_INIT_FAILED 31
353 unsigned int mtu_supported;
354 #define BNX2I_MAX_MTU_SUPPORTED 1500
355
356 struct Scsi_Host *shost;
357
358 u32 max_sqes;
359 u32 max_rqes;
360 u32 max_cqes;
361 u32 num_ccell;
362
363 int ofld_conns_active;
364
365 int max_active_conns;
366 struct iscsi_cid_queue cid_que;
367
368 rwlock_t ep_rdwr_lock;
369 struct list_head ep_ofld_list;
370 struct list_head ep_destroy_list;
371
372 /*
373 * BD table to be used with MP (Middle Path requests.
374 */
375 char *mp_bd_tbl;
376 dma_addr_t mp_bd_dma;
377 char *dummy_buffer;
378 dma_addr_t dummy_buf_dma;
379
380 spinlock_t lock; /* protects hba structure access */
381 struct mutex net_dev_lock;/* sync net device access */
382
383 /*
384 * PCI related info.
385 */
386 u16 pci_did;
387 u16 pci_vid;
388 u16 pci_sdid;
389 u16 pci_svid;
390 u16 pci_func;
391 u16 pci_devno;
392
393 /*
394 * Following are a bunch of statistics useful during development
395 * and later stage for score boarding.
396 */
397 u32 num_wqe_sent;
398 u32 num_cqe_rcvd;
399 u32 num_intr_claimed;
400 u32 link_changed_count;
401 u32 ipaddr_changed_count;
402 u32 num_sess_opened;
403 u32 num_conn_opened;
404 unsigned int ctx_ccell_tasks;
405};
406
407
408/*******************************************************************************
409 * QP [ SQ / RQ / CQ ] info.
410 ******************************************************************************/
411
412/*
413 * SQ/RQ/CQ generic structure definition
414 */
415struct sqe {
416 u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
417};
418
419struct rqe {
420 u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
421};
422
423struct cqe {
424 u8 cqe_byte[BNX2I_CQE_SIZE];
425};
426
427
428enum {
429#if defined(__LITTLE_ENDIAN)
430 CNIC_EVENT_COAL_INDEX = 0x0,
431 CNIC_SEND_DOORBELL = 0x4,
432 CNIC_EVENT_CQ_ARM = 0x7,
433 CNIC_RECV_DOORBELL = 0x8
434#elif defined(__BIG_ENDIAN)
435 CNIC_EVENT_COAL_INDEX = 0x2,
436 CNIC_SEND_DOORBELL = 0x6,
437 CNIC_EVENT_CQ_ARM = 0x4,
438 CNIC_RECV_DOORBELL = 0xa
439#endif
440};
441
442
443/*
444 * CQ DB
445 */
446struct bnx2x_iscsi_cq_pend_cmpl {
447 /* CQ producer, updated by Ustorm */
448 u16 ustrom_prod;
449 /* CQ pending completion counter */
450 u16 pend_cntr;
451};
452
453
454struct bnx2i_5771x_cq_db {
455 struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
456 /* CQ pending completion ITT array */
457 u16 itt[BNX2X_MAX_CQS];
458 /* Cstorm CQ sequence to notify array, updated by driver */;
459 u16 sqn[BNX2X_MAX_CQS];
460 u32 reserved[4] /* 16 byte allignment */;
461};
462
463
464struct bnx2i_5771x_sq_rq_db {
465 u16 prod_idx;
466 u8 reserved0[14]; /* Pad structure size to 16 bytes */
467};
468
469
470struct bnx2i_5771x_dbell_hdr {
471 u8 header;
472 /* 1 for rx doorbell, 0 for tx doorbell */
473#define B577XX_DOORBELL_HDR_RX (0x1<<0)
474#define B577XX_DOORBELL_HDR_RX_SHIFT 0
475 /* 0 for normal doorbell, 1 for advertise wnd doorbell */
476#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
477#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
478 /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
479#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
480#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
481 /* connection type */
482#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
483#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
484};
485
486struct bnx2i_5771x_dbell {
487 struct bnx2i_5771x_dbell_hdr dbell;
488 u8 pad[3];
489
490};
491
492/**
493 * struct qp_info - QP (share queue region) atrributes structure
494 *
495 * @ctx_base: ioremapped pci register base to access doorbell register
496 * pertaining to this offloaded connection
497 * @sq_virt: virtual address of send queue (SQ) region
498 * @sq_phys: DMA address of SQ memory region
499 * @sq_mem_size: SQ size
500 * @sq_prod_qe: SQ producer entry pointer
501 * @sq_cons_qe: SQ consumer entry pointer
502 * @sq_first_qe: virtaul address of first entry in SQ
503 * @sq_last_qe: virtaul address of last entry in SQ
504 * @sq_prod_idx: SQ producer index
505 * @sq_cons_idx: SQ consumer index
506 * @sqe_left: number sq entry left
507 * @sq_pgtbl_virt: page table describing buffer consituting SQ region
508 * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
509 * @sq_pgtbl_size: SQ page table size
510 * @cq_virt: virtual address of completion queue (CQ) region
511 * @cq_phys: DMA address of RQ memory region
512 * @cq_mem_size: CQ size
513 * @cq_prod_qe: CQ producer entry pointer
514 * @cq_cons_qe: CQ consumer entry pointer
515 * @cq_first_qe: virtaul address of first entry in CQ
516 * @cq_last_qe: virtaul address of last entry in CQ
517 * @cq_prod_idx: CQ producer index
518 * @cq_cons_idx: CQ consumer index
519 * @cqe_left: number cq entry left
520 * @cqe_size: size of each CQ entry
521 * @cqe_exp_seq_sn: next expected CQE sequence number
522 * @cq_pgtbl_virt: page table describing buffer consituting CQ region
523 * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
524 * @cq_pgtbl_size: CQ page table size
525 * @rq_virt: virtual address of receive queue (RQ) region
526 * @rq_phys: DMA address of RQ memory region
527 * @rq_mem_size: RQ size
528 * @rq_prod_qe: RQ producer entry pointer
529 * @rq_cons_qe: RQ consumer entry pointer
530 * @rq_first_qe: virtaul address of first entry in RQ
531 * @rq_last_qe: virtaul address of last entry in RQ
532 * @rq_prod_idx: RQ producer index
533 * @rq_cons_idx: RQ consumer index
534 * @rqe_left: number rq entry left
535 * @rq_pgtbl_virt: page table describing buffer consituting RQ region
536 * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
537 * @rq_pgtbl_size: RQ page table size
538 *
539 * queue pair (QP) is a per connection shared data structure which is used
540 * to send work requests (SQ), receive completion notifications (CQ)
541 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
542 * below holds queue memory, consumer/producer indexes and page table
543 * information
544 */
545struct qp_info {
546 void __iomem *ctx_base;
547#define DPM_TRIGER_TYPE 0x40
548
549#define BNX2I_570x_QUE_DB_SIZE 0
550#define BNX2I_5771x_QUE_DB_SIZE 16
551 struct sqe *sq_virt;
552 dma_addr_t sq_phys;
553 u32 sq_mem_size;
554
555 struct sqe *sq_prod_qe;
556 struct sqe *sq_cons_qe;
557 struct sqe *sq_first_qe;
558 struct sqe *sq_last_qe;
559 u16 sq_prod_idx;
560 u16 sq_cons_idx;
561 u32 sqe_left;
562
563 void *sq_pgtbl_virt;
564 dma_addr_t sq_pgtbl_phys;
565 u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
566
567 struct cqe *cq_virt;
568 dma_addr_t cq_phys;
569 u32 cq_mem_size;
570
571 struct cqe *cq_prod_qe;
572 struct cqe *cq_cons_qe;
573 struct cqe *cq_first_qe;
574 struct cqe *cq_last_qe;
575 u16 cq_prod_idx;
576 u16 cq_cons_idx;
577 u32 cqe_left;
578 u32 cqe_size;
579 u32 cqe_exp_seq_sn;
580
581 void *cq_pgtbl_virt;
582 dma_addr_t cq_pgtbl_phys;
583 u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
584
585 struct rqe *rq_virt;
586 dma_addr_t rq_phys;
587 u32 rq_mem_size;
588
589 struct rqe *rq_prod_qe;
590 struct rqe *rq_cons_qe;
591 struct rqe *rq_first_qe;
592 struct rqe *rq_last_qe;
593 u16 rq_prod_idx;
594 u16 rq_cons_idx;
595 u32 rqe_left;
596
597 void *rq_pgtbl_virt;
598 dma_addr_t rq_pgtbl_phys;
599 u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
600};
601
602
603
604/*
605 * CID handles
606 */
607struct ep_handles {
608 u32 fw_cid;
609 u32 drv_iscsi_cid;
610 u16 pg_cid;
611 u16 rsvd;
612};
613
614
615enum {
616 EP_STATE_IDLE = 0x0,
617 EP_STATE_PG_OFLD_START = 0x1,
618 EP_STATE_PG_OFLD_COMPL = 0x2,
619 EP_STATE_OFLD_START = 0x4,
620 EP_STATE_OFLD_COMPL = 0x8,
621 EP_STATE_CONNECT_START = 0x10,
622 EP_STATE_CONNECT_COMPL = 0x20,
623 EP_STATE_ULP_UPDATE_START = 0x40,
624 EP_STATE_ULP_UPDATE_COMPL = 0x80,
625 EP_STATE_DISCONN_START = 0x100,
626 EP_STATE_DISCONN_COMPL = 0x200,
627 EP_STATE_CLEANUP_START = 0x400,
628 EP_STATE_CLEANUP_CMPL = 0x800,
629 EP_STATE_TCP_FIN_RCVD = 0x1000,
630 EP_STATE_TCP_RST_RCVD = 0x2000,
631 EP_STATE_PG_OFLD_FAILED = 0x1000000,
632 EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
633 EP_STATE_CLEANUP_FAILED = 0x4000000,
634 EP_STATE_OFLD_FAILED = 0x8000000,
635 EP_STATE_CONNECT_FAILED = 0x10000000,
636 EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
637};
638
639/**
640 * struct bnx2i_endpoint - representation of tcp connection in NX2 world
641 *
642 * @link: list head to link elements
643 * @hba: adapter to which this connection belongs
644 * @conn: iscsi connection this EP is linked to
645 * @sess: iscsi session this EP is linked to
646 * @cm_sk: cnic sock struct
647 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
648 * after HBA reset is completed by bnx2i/cnic/bnx2
649 * modules
650 * @state: tracks offload connection state machine
651 * @teardown_mode: indicates if conn teardown is abortive or orderly
652 * @qp: QP information
653 * @ids: contains chip allocated *context id* & driver assigned
654 * *iscsi cid*
655 * @ofld_timer: offload timer to detect timeout
656 * @ofld_wait: wait queue
657 *
658 * Endpoint Structure - equivalent of tcp socket structure
659 */
660struct bnx2i_endpoint {
661 struct list_head link;
662 struct bnx2i_hba *hba;
663 struct bnx2i_conn *conn;
664 struct cnic_sock *cm_sk;
665 u32 hba_age;
666 u32 state;
667 unsigned long timestamp;
668 int num_active_cmds;
669
670 struct qp_info qp;
671 struct ep_handles ids;
672 #define ep_iscsi_cid ids.drv_iscsi_cid
673 #define ep_cid ids.fw_cid
674 #define ep_pg_cid ids.pg_cid
675 struct timer_list ofld_timer;
676 wait_queue_head_t ofld_wait;
677};
678
679
680
681/* Global variables */
682extern unsigned int error_mask1, error_mask2;
683extern u64 iscsi_error_mask;
684extern unsigned int en_tcp_dack;
685extern unsigned int event_coal_div;
686
687extern struct scsi_transport_template *bnx2i_scsi_xport_template;
688extern struct iscsi_transport bnx2i_iscsi_transport;
689extern struct cnic_ulp_ops bnx2i_cnic_cb;
690
691extern unsigned int sq_size;
692extern unsigned int rq_size;
693
694extern struct device_attribute *bnx2i_dev_attributes[];
695
696
697
698/*
699 * Function Prototypes
700 */
701extern void bnx2i_identify_device(struct bnx2i_hba *hba);
702extern void bnx2i_register_device(struct bnx2i_hba *hba);
703
704extern void bnx2i_ulp_init(struct cnic_dev *dev);
705extern void bnx2i_ulp_exit(struct cnic_dev *dev);
706extern void bnx2i_start(void *handle);
707extern void bnx2i_stop(void *handle);
708extern void bnx2i_reg_dev_all(void);
709extern void bnx2i_unreg_dev_all(void);
710extern struct bnx2i_hba *get_adapter_list_head(void);
711
712struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
713 u16 iscsi_cid);
714
715int bnx2i_alloc_ep_pool(void);
716void bnx2i_release_ep_pool(void);
717struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
718struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
719
720struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
721
722struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
723void bnx2i_free_hba(struct bnx2i_hba *hba);
724
725void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
726void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
727
728void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
729
730void bnx2i_drop_session(struct iscsi_cls_session *session);
731
732extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
733extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
734 struct iscsi_task *mtask);
735extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
736 struct iscsi_task *mtask);
737extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
738 struct bnx2i_cmd *cmnd);
739extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
740 struct iscsi_task *mtask, u32 ttt,
741 char *datap, int data_len, int unsol);
742extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
743 struct iscsi_task *mtask);
744extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
745 struct bnx2i_cmd *cmd);
746extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
747 struct bnx2i_endpoint *ep);
748extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
749extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
750 struct bnx2i_endpoint *ep);
751
752extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
753 struct bnx2i_endpoint *ep);
754extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
755 struct bnx2i_endpoint *ep);
756extern void bnx2i_ep_ofld_timer(unsigned long data);
757extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
758 struct bnx2i_hba *hba, u32 iscsi_cid);
759extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
760 struct bnx2i_hba *hba, u32 iscsi_cid);
761
762extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
763extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
764
765/* Debug related function prototypes */
766extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
767extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
768extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
769extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
770
771#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 000000000000..906cef5cda86
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include <scsi/scsi_tcq.h>
15#include <scsi/libiscsi.h>
16#include "bnx2i.h"
17
18/**
19 * bnx2i_get_cid_num - get cid from ep
20 * @ep: endpoint pointer
21 *
22 * Only applicable to 57710 family of devices
23 */
24static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
25{
26 u32 cid;
27
28 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
29 cid = ep->ep_cid;
30 else
31 cid = GET_CID_NUM(ep->ep_cid);
32 return cid;
33}
34
35
36/**
37 * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
38 * @hba: Adapter for which adjustments is to be made
39 *
40 * Only applicable to 57710 family of devices
41 */
42static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
43{
44 u32 num_elements_per_pg;
45
46 if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
47 test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
48 test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
49 if (!is_power_of_2(hba->max_sqes))
50 hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
51
52 if (!is_power_of_2(hba->max_rqes))
53 hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
54 }
55
56 /* Adjust each queue size if the user selection does not
57 * yield integral num of page buffers
58 */
59 /* adjust SQ */
60 num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
61 if (hba->max_sqes < num_elements_per_pg)
62 hba->max_sqes = num_elements_per_pg;
63 else if (hba->max_sqes % num_elements_per_pg)
64 hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
65 ~(num_elements_per_pg - 1);
66
67 /* adjust CQ */
68 num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
69 if (hba->max_cqes < num_elements_per_pg)
70 hba->max_cqes = num_elements_per_pg;
71 else if (hba->max_cqes % num_elements_per_pg)
72 hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
73 ~(num_elements_per_pg - 1);
74
75 /* adjust RQ */
76 num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
77 if (hba->max_rqes < num_elements_per_pg)
78 hba->max_rqes = num_elements_per_pg;
79 else if (hba->max_rqes % num_elements_per_pg)
80 hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
81 ~(num_elements_per_pg - 1);
82}
83
84
85/**
86 * bnx2i_get_link_state - get network interface link state
87 * @hba: adapter instance pointer
88 *
89 * updates adapter structure flag based on netdev state
90 */
91static void bnx2i_get_link_state(struct bnx2i_hba *hba)
92{
93 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
94 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
95 else
96 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
97}
98
99
100/**
101 * bnx2i_iscsi_license_error - displays iscsi license related error message
102 * @hba: adapter instance pointer
103 * @error_code: error classification
104 *
105 * Puts out an error log when driver is unable to offload iscsi connection
106 * due to license restrictions
107 */
108static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
109{
110 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
111 /* iSCSI offload not supported on this device */
112 printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
113 hba->netdev->name);
114 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
115 /* iSCSI offload not supported on this LOM device */
116 printk(KERN_ERR "bnx2i: LOM is not enable to "
117 "offload iSCSI connections, dev=%s\n",
118 hba->netdev->name);
119 set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
120}
121
122
123/**
124 * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
125 * @ep: endpoint (transport indentifier) structure
126 * @action: action, ARM or DISARM. For now only ARM_CQE is used
127 *
128 * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
129 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
130 * outstanding and on chip timer expires
131 */
132void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{
134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index;
136
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return;
139
140 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn +
142 ep->num_active_cmds / event_coal_div;
143 cq_index %= (ep->qp.cqe_size * 2 + 1);
144 if (!cq_index) {
145 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *)
147 ep->qp.cq_pgtbl_virt;
148 cq_db->sqn[0] = cq_index;
149 }
150 }
151}
152
153
154/**
155 * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
156 * @conn: iscsi connection on which RQ event occured
157 * @ptr: driver buffer to which RQ buffer contents is to
158 * be copied
159 * @len: length of valid data inside RQ buf
160 *
161 * Copies RQ buffer contents from shared (DMA'able) memory region to
162 * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
163 * scsi sense info
164 */
165void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
166{
167 if (!bnx2i_conn->ep->qp.rqe_left)
168 return;
169
170 bnx2i_conn->ep->qp.rqe_left--;
171 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
172 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
173 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
174 bnx2i_conn->ep->qp.rq_cons_idx = 0;
175 } else {
176 bnx2i_conn->ep->qp.rq_cons_qe++;
177 bnx2i_conn->ep->qp.rq_cons_idx++;
178 }
179}
180
181
182static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
183{
184 struct bnx2i_5771x_dbell dbell;
185 u32 msg;
186
187 memset(&dbell, 0, sizeof(dbell));
188 dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
189 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
190 msg = *((u32 *)&dbell);
191 /* TODO : get doorbell register mapping */
192 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
193}
194
195
196/**
197 * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
198 * @conn: iscsi connection on which event to post
199 * @count: number of RQ buffer being posted to chip
200 *
201 * No need to ring hardware doorbell for 57710 family of devices
202 */
203void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
204{
205 struct bnx2i_5771x_sq_rq_db *rq_db;
206 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
207 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
208
209 ep->qp.rqe_left += count;
210 ep->qp.rq_prod_idx &= 0x7FFF;
211 ep->qp.rq_prod_idx += count;
212
213 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
214 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
215 if (!hi_bit)
216 ep->qp.rq_prod_idx |= 0x8000;
217 } else
218 ep->qp.rq_prod_idx |= hi_bit;
219
220 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
221 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
222 rq_db->prod_idx = ep->qp.rq_prod_idx;
223 /* no need to ring hardware doorbell for 57710 */
224 } else {
225 writew(ep->qp.rq_prod_idx,
226 ep->qp.ctx_base + CNIC_RECV_DOORBELL);
227 }
228 mmiowb();
229}
230
231
232/**
233 * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
234 * @conn: iscsi connection to which new SQ entries belong
235 * @count: number of SQ WQEs to post
236 *
237 * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
238 * of devices. For 5706/5708/5709 new SQ WQE count is written into the
239 * doorbell register
240 */
241static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
242{
243 struct bnx2i_5771x_sq_rq_db *sq_db;
244 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
245
246 ep->num_active_cmds++;
247 wmb(); /* flush SQ WQE memory before the doorbell is rung */
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
250 sq_db->prod_idx = ep->qp.sq_prod_idx;
251 bnx2i_ring_577xx_doorbell(bnx2i_conn);
252 } else
253 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
254
255 mmiowb(); /* flush posted PCI writes */
256}
257
258
259/**
260 * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
261 * @conn: iscsi connection to which new SQ entries belong
262 * @count: number of SQ WQEs to post
263 *
264 * this routine will update SQ driver parameters and ring the doorbell
265 */
266static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
267 int count)
268{
269 int tmp_cnt;
270
271 if (count == 1) {
272 if (bnx2i_conn->ep->qp.sq_prod_qe ==
273 bnx2i_conn->ep->qp.sq_last_qe)
274 bnx2i_conn->ep->qp.sq_prod_qe =
275 bnx2i_conn->ep->qp.sq_first_qe;
276 else
277 bnx2i_conn->ep->qp.sq_prod_qe++;
278 } else {
279 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
280 bnx2i_conn->ep->qp.sq_last_qe)
281 bnx2i_conn->ep->qp.sq_prod_qe += count;
282 else {
283 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
284 bnx2i_conn->ep->qp.sq_prod_qe;
285 bnx2i_conn->ep->qp.sq_prod_qe =
286 &bnx2i_conn->ep->qp.sq_first_qe[count -
287 (tmp_cnt + 1)];
288 }
289 }
290 bnx2i_conn->ep->qp.sq_prod_idx += count;
291 /* Ring the doorbell */
292 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
293}
294
295
296/**
297 * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
298 * @conn: iscsi connection
299 * @cmd: driver command structure which is requesting
300 * a WQE to sent to chip for further processing
301 *
302 * prepare and post an iSCSI Login request WQE to CNIC firmware
303 */
304int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
305 struct iscsi_task *task)
306{
307 struct bnx2i_cmd *bnx2i_cmd;
308 struct bnx2i_login_request *login_wqe;
309 struct iscsi_login *login_hdr;
310 u32 dword;
311
312 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
313 login_hdr = (struct iscsi_login *)task->hdr;
314 login_wqe = (struct bnx2i_login_request *)
315 bnx2i_conn->ep->qp.sq_prod_qe;
316
317 login_wqe->op_code = login_hdr->opcode;
318 login_wqe->op_attr = login_hdr->flags;
319 login_wqe->version_max = login_hdr->max_version;
320 login_wqe->version_min = login_hdr->min_version;
321 login_wqe->data_length = ntoh24(login_hdr->dlength);
322 login_wqe->isid_lo = *((u32 *) login_hdr->isid);
323 login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
324 login_wqe->tsih = login_hdr->tsih;
325 login_wqe->itt = task->itt |
326 (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
327 login_wqe->cid = login_hdr->cid;
328
329 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
330 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
331
332 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
333 login_wqe->resp_bd_list_addr_hi =
334 (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
335
336 dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
337 (bnx2i_conn->gen_pdu.resp_buf_size <<
338 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
339 login_wqe->resp_buffer = dword;
340 login_wqe->flags = 0;
341 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
342 login_wqe->bd_list_addr_hi =
343 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
344 login_wqe->num_bds = 1;
345 login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
346
347 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
348 return 0;
349}
350
351/**
352 * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
353 * @conn: iscsi connection
354 * @mtask: driver command structure which is requesting
355 * a WQE to sent to chip for further processing
356 *
357 * prepare and post an iSCSI Login request WQE to CNIC firmware
358 */
359int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
360 struct iscsi_task *mtask)
361{
362 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
363 struct iscsi_tm *tmfabort_hdr;
364 struct scsi_cmnd *ref_sc;
365 struct iscsi_task *ctask;
366 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword;
369
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
372 tmfabort_wqe = (struct bnx2i_tmf_request *)
373 bnx2i_conn->ep->qp.sq_prod_qe;
374
375 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
376 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc)
388 /*
389 * the iscsi layer must have completed the cmd while this
390 * was starting up.
391 */
392 return 0;
393 ref_sc = ctask->sc;
394
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
403 tmfabort_wqe->bd_list_addr_hi = (u32)
404 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
405 tmfabort_wqe->num_bds = 1;
406 tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
407
408 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
409 return 0;
410}
411
412/**
413 * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
414 * @conn: iscsi connection
415 * @cmd: driver command structure which is requesting
416 * a WQE to sent to chip for further processing
417 *
418 * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
419 */
420int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
421 struct bnx2i_cmd *cmd)
422{
423 struct bnx2i_cmd_request *scsi_cmd_wqe;
424
425 scsi_cmd_wqe = (struct bnx2i_cmd_request *)
426 bnx2i_conn->ep->qp.sq_prod_qe;
427 memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
428 scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
429
430 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
431 return 0;
432}
433
434/**
435 * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
436 * @conn: iscsi connection
437 * @cmd: driver command structure which is requesting
438 * a WQE to sent to chip for further processing
439 * @ttt: TTT to be used when building pdu header
440 * @datap: payload buffer pointer
441 * @data_len: payload data length
442 * @unsol: indicated whether nopout pdu is unsolicited pdu or
443 * in response to target's NOPIN w/ TTT != FFFFFFFF
444 *
445 * prepare and post a nopout request WQE to CNIC firmware
446 */
447int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
448 struct iscsi_task *task, u32 ttt,
449 char *datap, int data_len, int unsol)
450{
451 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
452 struct bnx2i_cmd *bnx2i_cmd;
453 struct bnx2i_nop_out_request *nopout_wqe;
454 struct iscsi_nopout *nopout_hdr;
455
456 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
457 nopout_hdr = (struct iscsi_nopout *)task->hdr;
458 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
459 nopout_wqe->op_code = nopout_hdr->opcode;
460 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
461 memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
462
463 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
464 u32 tmp = nopout_hdr->lun[0];
465 /* 57710 requires LUN field to be swapped */
466 nopout_hdr->lun[0] = nopout_hdr->lun[1];
467 nopout_hdr->lun[1] = tmp;
468 }
469
470 nopout_wqe->itt = ((u16)task->itt |
471 (ISCSI_TASK_TYPE_MPATH <<
472 ISCSI_TMF_REQUEST_TYPE_SHIFT));
473 nopout_wqe->ttt = ttt;
474 nopout_wqe->flags = 0;
475 if (!unsol)
476 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
477 else if (nopout_hdr->itt == RESERVED_ITT)
478 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
479
480 nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
481 nopout_wqe->data_length = data_len;
482 if (data_len) {
483 /* handle payload data, not required in first release */
484 printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
485 } else {
486 nopout_wqe->bd_list_addr_lo = (u32)
487 bnx2i_conn->hba->mp_bd_dma;
488 nopout_wqe->bd_list_addr_hi =
489 (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
490 nopout_wqe->num_bds = 1;
491 }
492 nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
493
494 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
495 return 0;
496}
497
498
499/**
500 * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
501 * @conn: iscsi connection
502 * @cmd: driver command structure which is requesting
503 * a WQE to sent to chip for further processing
504 *
505 * prepare and post logout request WQE to CNIC firmware
506 */
507int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
508 struct iscsi_task *task)
509{
510 struct bnx2i_cmd *bnx2i_cmd;
511 struct bnx2i_logout_request *logout_wqe;
512 struct iscsi_logout *logout_hdr;
513
514 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
515 logout_hdr = (struct iscsi_logout *)task->hdr;
516
517 logout_wqe = (struct bnx2i_logout_request *)
518 bnx2i_conn->ep->qp.sq_prod_qe;
519 memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
520
521 logout_wqe->op_code = logout_hdr->opcode;
522 logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
523 logout_wqe->op_attr =
524 logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
525 logout_wqe->itt = ((u16)task->itt |
526 (ISCSI_TASK_TYPE_MPATH <<
527 ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
528 logout_wqe->data_length = 0;
529 logout_wqe->cid = 0;
530
531 logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
532 logout_wqe->bd_list_addr_hi = (u32)
533 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
534 logout_wqe->num_bds = 1;
535 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
536
537 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
538 return 0;
539}
540
541
542/**
543 * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
544 * @conn: iscsi connection which requires iscsi parameter update
545 *
546 * sends down iSCSI Conn Update request to move iSCSI conn to FFP
547 */
548void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
549{
550 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
551 struct bnx2i_hba *hba = bnx2i_conn->hba;
552 struct kwqe *kwqe_arr[2];
553 struct iscsi_kwqe_conn_update *update_wqe;
554 struct iscsi_kwqe_conn_update conn_update_kwqe;
555
556 update_wqe = &conn_update_kwqe;
557
558 update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
559 update_wqe->hdr.flags =
560 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
561
562 /* 5771x requires conn context id to be passed as is */
563 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
564 update_wqe->context_id = bnx2i_conn->ep->ep_cid;
565 else
566 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
567 update_wqe->conn_flags = 0;
568 if (conn->hdrdgst_en)
569 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
570 if (conn->datadgst_en)
571 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
572 if (conn->session->initial_r2t_en)
573 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
574 if (conn->session->imm_data_en)
575 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
576
577 update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
578 update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
579 update_wqe->first_burst_length = conn->session->first_burst;
580 update_wqe->max_burst_length = conn->session->max_burst;
581 update_wqe->exp_stat_sn = conn->exp_statsn;
582 update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
583 update_wqe->session_error_recovery_level = conn->session->erl;
584 iscsi_conn_printk(KERN_ALERT, conn,
585 "bnx2i: conn update - MBL 0x%x FBL 0x%x"
586 "MRDSL_I 0x%x MRDSL_T 0x%x \n",
587 update_wqe->max_burst_length,
588 update_wqe->first_burst_length,
589 update_wqe->max_recv_pdu_length,
590 update_wqe->max_send_pdu_length);
591
592 kwqe_arr[0] = (struct kwqe *) update_wqe;
593 if (hba->cnic && hba->cnic->submit_kwqes)
594 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
595}
596
597
598/**
599 * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
600 * @data: endpoint (transport handle) structure pointer
601 *
602 * routine to handle connection offload/destroy request timeout
603 */
604void bnx2i_ep_ofld_timer(unsigned long data)
605{
606 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
607
608 if (ep->state == EP_STATE_OFLD_START) {
609 printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
610 ep->state = EP_STATE_OFLD_FAILED;
611 } else if (ep->state == EP_STATE_DISCONN_START) {
612 printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
613 ep->state = EP_STATE_DISCONN_TIMEDOUT;
614 } else if (ep->state == EP_STATE_CLEANUP_START) {
615 printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
616 ep->state = EP_STATE_CLEANUP_FAILED;
617 }
618
619 wake_up_interruptible(&ep->ofld_wait);
620}
621
622
623static int bnx2i_power_of2(u32 val)
624{
625 u32 power = 0;
626 if (val & (val - 1))
627 return power;
628 val--;
629 while (val) {
630 val = val >> 1;
631 power++;
632 }
633 return power;
634}
635
636
637/**
638 * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
639 * @hba: adapter structure pointer
640 * @cmd: driver command structure which is requesting
641 * a WQE to sent to chip for further processing
642 *
643 * prepares and posts CONN_OFLD_REQ1/2 KWQE
644 */
645void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
646{
647 struct bnx2i_cleanup_request *cmd_cleanup;
648
649 cmd_cleanup =
650 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
651 memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
652
653 cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
654 cmd_cleanup->itt = cmd->req.itt;
655 cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
656
657 bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
658}
659
660
661/**
662 * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
663 * @hba: adapter structure pointer
664 * @ep: endpoint (transport indentifier) structure
665 *
666 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
667 * iscsi connection context clean-up process
668 */
669void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
670{
671 struct kwqe *kwqe_arr[2];
672 struct iscsi_kwqe_conn_destroy conn_cleanup;
673
674 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
675
676 conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
677 conn_cleanup.hdr.flags =
678 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
679 /* 5771x requires conn context id to be passed as is */
680 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
681 conn_cleanup.context_id = ep->ep_cid;
682 else
683 conn_cleanup.context_id = (ep->ep_cid >> 7);
684
685 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
686
687 kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
688 if (hba->cnic && hba->cnic->submit_kwqes)
689 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
690}
691
692
693/**
694 * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
695 * @hba: adapter structure pointer
696 * @ep: endpoint (transport indentifier) structure
697 *
698 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
699 */
700static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
701 struct bnx2i_endpoint *ep)
702{
703 struct kwqe *kwqe_arr[2];
704 struct iscsi_kwqe_conn_offload1 ofld_req1;
705 struct iscsi_kwqe_conn_offload2 ofld_req2;
706 dma_addr_t dma_addr;
707 int num_kwqes = 2;
708 u32 *ptbl;
709
710 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
711 ofld_req1.hdr.flags =
712 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
713
714 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
715
716 dma_addr = ep->qp.sq_pgtbl_phys;
717 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
718 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
719
720 dma_addr = ep->qp.cq_pgtbl_phys;
721 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
722 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
723
724 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
725 ofld_req2.hdr.flags =
726 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
727
728 dma_addr = ep->qp.rq_pgtbl_phys;
729 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
730 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
731
732 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
733
734 ofld_req2.sq_first_pte.hi = *ptbl++;
735 ofld_req2.sq_first_pte.lo = *ptbl;
736
737 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
738 ofld_req2.cq_first_pte.hi = *ptbl++;
739 ofld_req2.cq_first_pte.lo = *ptbl;
740
741 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
742 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
743 ofld_req2.num_additional_wqes = 0;
744
745 if (hba->cnic && hba->cnic->submit_kwqes)
746 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
747}
748
749
750/**
751 * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
752 * @hba: adapter structure pointer
753 * @ep: endpoint (transport indentifier) structure
754 *
755 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
756 */
757static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
758 struct bnx2i_endpoint *ep)
759{
760 struct kwqe *kwqe_arr[5];
761 struct iscsi_kwqe_conn_offload1 ofld_req1;
762 struct iscsi_kwqe_conn_offload2 ofld_req2;
763 struct iscsi_kwqe_conn_offload3 ofld_req3[1];
764 dma_addr_t dma_addr;
765 int num_kwqes = 2;
766 u32 *ptbl;
767
768 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
769 ofld_req1.hdr.flags =
770 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
771
772 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
773
774 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
775 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
776 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
777
778 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
779 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
780 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
781
782 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
783 ofld_req2.hdr.flags =
784 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
785
786 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
787 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
788 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
789
790 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
791 ofld_req2.sq_first_pte.hi = *ptbl++;
792 ofld_req2.sq_first_pte.lo = *ptbl;
793
794 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
795 ofld_req2.cq_first_pte.hi = *ptbl++;
796 ofld_req2.cq_first_pte.lo = *ptbl;
797
798 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
799 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
800
801 ofld_req2.num_additional_wqes = 1;
802 memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
803 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
804 ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
805 ofld_req3[0].qp_first_pte[0].lo = *ptbl;
806
807 kwqe_arr[2] = (struct kwqe *) ofld_req3;
808 /* need if we decide to go with multiple KCQE's per conn */
809 num_kwqes += 1;
810
811 if (hba->cnic && hba->cnic->submit_kwqes)
812 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
813}
814
815/**
816 * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
817 *
818 * @hba: adapter structure pointer
819 * @ep: endpoint (transport indentifier) structure
820 *
821 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
822 */
823void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
824{
825 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
826 bnx2i_5771x_send_conn_ofld_req(hba, ep);
827 else
828 bnx2i_570x_send_conn_ofld_req(hba, ep);
829}
830
831
832/**
833 * setup_qp_page_tables - iscsi QP page table setup function
834 * @ep: endpoint (transport indentifier) structure
835 *
836 * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
837 * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
838 * PT in little endian format
839 */
840static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
841{
842 int num_pages;
843 u32 *ptbl;
844 dma_addr_t page;
845 int cnic_dev_10g;
846
847 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
848 cnic_dev_10g = 1;
849 else
850 cnic_dev_10g = 0;
851
852 /* SQ page table */
853 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
854 num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
855 page = ep->qp.sq_phys;
856
857 if (cnic_dev_10g)
858 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
859 else
860 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
861 while (num_pages--) {
862 if (cnic_dev_10g) {
863 /* PTE is written in little endian format for 57710 */
864 *ptbl = (u32) page;
865 ptbl++;
866 *ptbl = (u32) ((u64) page >> 32);
867 ptbl++;
868 page += PAGE_SIZE;
869 } else {
870 /* PTE is written in big endian format for
871 * 5706/5708/5709 devices */
872 *ptbl = (u32) ((u64) page >> 32);
873 ptbl++;
874 *ptbl = (u32) page;
875 ptbl++;
876 page += PAGE_SIZE;
877 }
878 }
879
880 /* RQ page table */
881 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
882 num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
883 page = ep->qp.rq_phys;
884
885 if (cnic_dev_10g)
886 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
887 else
888 ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
889 while (num_pages--) {
890 if (cnic_dev_10g) {
891 /* PTE is written in little endian format for 57710 */
892 *ptbl = (u32) page;
893 ptbl++;
894 *ptbl = (u32) ((u64) page >> 32);
895 ptbl++;
896 page += PAGE_SIZE;
897 } else {
898 /* PTE is written in big endian format for
899 * 5706/5708/5709 devices */
900 *ptbl = (u32) ((u64) page >> 32);
901 ptbl++;
902 *ptbl = (u32) page;
903 ptbl++;
904 page += PAGE_SIZE;
905 }
906 }
907
908 /* CQ page table */
909 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
910 num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
911 page = ep->qp.cq_phys;
912
913 if (cnic_dev_10g)
914 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
915 else
916 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
917 while (num_pages--) {
918 if (cnic_dev_10g) {
919 /* PTE is written in little endian format for 57710 */
920 *ptbl = (u32) page;
921 ptbl++;
922 *ptbl = (u32) ((u64) page >> 32);
923 ptbl++;
924 page += PAGE_SIZE;
925 } else {
926 /* PTE is written in big endian format for
927 * 5706/5708/5709 devices */
928 *ptbl = (u32) ((u64) page >> 32);
929 ptbl++;
930 *ptbl = (u32) page;
931 ptbl++;
932 page += PAGE_SIZE;
933 }
934 }
935}
936
937
938/**
939 * bnx2i_alloc_qp_resc - allocates required resources for QP.
940 * @hba: adapter structure pointer
941 * @ep: endpoint (transport indentifier) structure
942 *
943 * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
944 * memory for SQ/RQ/CQ and page tables. EP structure elements such
945 * as producer/consumer indexes/pointers, queue sizes and page table
946 * contents are setup
947 */
948int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
949{
950 struct bnx2i_5771x_cq_db *cq_db;
951
952 ep->hba = hba;
953 ep->conn = NULL;
954 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
955
956 /* Allocate page table memory for SQ which is page aligned */
957 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
958 ep->qp.sq_mem_size =
959 (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
960 ep->qp.sq_pgtbl_size =
961 (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
962 ep->qp.sq_pgtbl_size =
963 (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
964
965 ep->qp.sq_pgtbl_virt =
966 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
967 &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
968 if (!ep->qp.sq_pgtbl_virt) {
969 printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
970 ep->qp.sq_pgtbl_size);
971 goto mem_alloc_err;
972 }
973
974 /* Allocate memory area for actual SQ element */
975 ep->qp.sq_virt =
976 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
977 &ep->qp.sq_phys, GFP_KERNEL);
978 if (!ep->qp.sq_virt) {
979 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
980 ep->qp.sq_mem_size);
981 goto mem_alloc_err;
982 }
983
984 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
985 ep->qp.sq_first_qe = ep->qp.sq_virt;
986 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
987 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
988 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
989 ep->qp.sq_prod_idx = 0;
990 ep->qp.sq_cons_idx = 0;
991 ep->qp.sqe_left = hba->max_sqes;
992
993 /* Allocate page table memory for CQ which is page aligned */
994 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
995 ep->qp.cq_mem_size =
996 (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
997 ep->qp.cq_pgtbl_size =
998 (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
999 ep->qp.cq_pgtbl_size =
1000 (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1001
1002 ep->qp.cq_pgtbl_virt =
1003 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1004 &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1005 if (!ep->qp.cq_pgtbl_virt) {
1006 printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
1007 ep->qp.cq_pgtbl_size);
1008 goto mem_alloc_err;
1009 }
1010
1011 /* Allocate memory area for actual CQ element */
1012 ep->qp.cq_virt =
1013 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1014 &ep->qp.cq_phys, GFP_KERNEL);
1015 if (!ep->qp.cq_virt) {
1016 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1017 ep->qp.cq_mem_size);
1018 goto mem_alloc_err;
1019 }
1020 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1021
1022 ep->qp.cq_first_qe = ep->qp.cq_virt;
1023 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1024 ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1025 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1026 ep->qp.cq_prod_idx = 0;
1027 ep->qp.cq_cons_idx = 0;
1028 ep->qp.cqe_left = hba->max_cqes;
1029 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1030 ep->qp.cqe_size = hba->max_cqes;
1031
1032 /* Invalidate all EQ CQE index, req only for 57710 */
1033 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1034 memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
1035
1036 /* Allocate page table memory for RQ which is page aligned */
1037 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1038 ep->qp.rq_mem_size =
1039 (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1040 ep->qp.rq_pgtbl_size =
1041 (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
1042 ep->qp.rq_pgtbl_size =
1043 (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1044
1045 ep->qp.rq_pgtbl_virt =
1046 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1047 &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1048 if (!ep->qp.rq_pgtbl_virt) {
1049 printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
1050 ep->qp.rq_pgtbl_size);
1051 goto mem_alloc_err;
1052 }
1053
1054 /* Allocate memory area for actual RQ element */
1055 ep->qp.rq_virt =
1056 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1057 &ep->qp.rq_phys, GFP_KERNEL);
1058 if (!ep->qp.rq_virt) {
1059 printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
1060 ep->qp.rq_mem_size);
1061 goto mem_alloc_err;
1062 }
1063
1064 ep->qp.rq_first_qe = ep->qp.rq_virt;
1065 ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1066 ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1067 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1068 ep->qp.rq_prod_idx = 0x8000;
1069 ep->qp.rq_cons_idx = 0;
1070 ep->qp.rqe_left = hba->max_rqes;
1071
1072 setup_qp_page_tables(ep);
1073
1074 return 0;
1075
1076mem_alloc_err:
1077 bnx2i_free_qp_resc(hba, ep);
1078 return -ENOMEM;
1079}
1080
1081
1082
1083/**
1084 * bnx2i_free_qp_resc - free memory resources held by QP
1085 * @hba: adapter structure pointer
1086 * @ep: endpoint (transport indentifier) structure
1087 *
1088 * Free QP resources - SQ/RQ/CQ memory and page tables.
1089 */
1090void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1091{
1092 if (ep->qp.ctx_base) {
1093 iounmap(ep->qp.ctx_base);
1094 ep->qp.ctx_base = NULL;
1095 }
1096 /* Free SQ mem */
1097 if (ep->qp.sq_pgtbl_virt) {
1098 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1099 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1100 ep->qp.sq_pgtbl_virt = NULL;
1101 ep->qp.sq_pgtbl_phys = 0;
1102 }
1103 if (ep->qp.sq_virt) {
1104 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1105 ep->qp.sq_virt, ep->qp.sq_phys);
1106 ep->qp.sq_virt = NULL;
1107 ep->qp.sq_phys = 0;
1108 }
1109
1110 /* Free RQ mem */
1111 if (ep->qp.rq_pgtbl_virt) {
1112 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1113 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1114 ep->qp.rq_pgtbl_virt = NULL;
1115 ep->qp.rq_pgtbl_phys = 0;
1116 }
1117 if (ep->qp.rq_virt) {
1118 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1119 ep->qp.rq_virt, ep->qp.rq_phys);
1120 ep->qp.rq_virt = NULL;
1121 ep->qp.rq_phys = 0;
1122 }
1123
1124 /* Free CQ mem */
1125 if (ep->qp.cq_pgtbl_virt) {
1126 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1127 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1128 ep->qp.cq_pgtbl_virt = NULL;
1129 ep->qp.cq_pgtbl_phys = 0;
1130 }
1131 if (ep->qp.cq_virt) {
1132 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1133 ep->qp.cq_virt, ep->qp.cq_phys);
1134 ep->qp.cq_virt = NULL;
1135 ep->qp.cq_phys = 0;
1136 }
1137}
1138
1139
1140/**
1141 * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1142 * @hba: adapter structure pointer
1143 *
1144 * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1145 * This results in iSCSi support validation and on-chip context manager
1146 * initialization. Firmware completes this handshake with a CQE carrying
1147 * the result of iscsi support validation. Parameter carried by
1148 * iscsi init request determines the number of offloaded connection and
1149 * tolerance level for iscsi protocol violation this hba/chip can support
1150 */
1151int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1152{
1153 struct kwqe *kwqe_arr[3];
1154 struct iscsi_kwqe_init1 iscsi_init;
1155 struct iscsi_kwqe_init2 iscsi_init2;
1156 int rc = 0;
1157 u64 mask64;
1158
1159 bnx2i_adjust_qp_size(hba);
1160
1161 iscsi_init.flags =
1162 ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1163 if (en_tcp_dack)
1164 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1165 iscsi_init.reserved0 = 0;
1166 iscsi_init.num_cqs = 1;
1167 iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
1168 iscsi_init.hdr.flags =
1169 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1170
1171 iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
1172 iscsi_init.dummy_buffer_addr_hi =
1173 (u32) ((u64) hba->dummy_buf_dma >> 32);
1174
1175 hba->ctx_ccell_tasks =
1176 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1177 iscsi_init.num_ccells_per_conn = hba->num_ccell;
1178 iscsi_init.num_tasks_per_conn = hba->max_sqes;
1179 iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1180 iscsi_init.sq_num_wqes = hba->max_sqes;
1181 iscsi_init.cq_log_wqes_per_page =
1182 (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
1183 iscsi_init.cq_num_wqes = hba->max_cqes;
1184 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1185 (PAGE_SIZE - 1)) / PAGE_SIZE;
1186 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1187 (PAGE_SIZE - 1)) / PAGE_SIZE;
1188 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1189 iscsi_init.rq_num_wqes = hba->max_rqes;
1190
1191
1192 iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
1193 iscsi_init2.hdr.flags =
1194 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1195 iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
1196 mask64 = 0x0ULL;
1197 mask64 |= (
1198 /* CISCO MDS */
1199 (1UL <<
1200 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
1201 /* HP MSA1510i */
1202 (1UL <<
1203 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1204 /* EMC */
1205 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1206 if (error_mask1)
1207 iscsi_init2.error_bit_map[0] = error_mask1;
1208 else
1209 iscsi_init2.error_bit_map[0] = (u32) mask64;
1210
1211 if (error_mask2)
1212 iscsi_init2.error_bit_map[1] = error_mask2;
1213 else
1214 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1215
1216 iscsi_error_mask = mask64;
1217
1218 kwqe_arr[0] = (struct kwqe *) &iscsi_init;
1219 kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
1220
1221 if (hba->cnic && hba->cnic->submit_kwqes)
1222 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
1223 return rc;
1224}
1225
1226
1227/**
1228 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1229 * @conn: iscsi connection
1230 * @cqe: pointer to newly DMA'ed CQE entry for processing
1231 *
1232 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1233 */
1234static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1235 struct bnx2i_conn *bnx2i_conn,
1236 struct cqe *cqe)
1237{
1238 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1239 struct bnx2i_cmd_response *resp_cqe;
1240 struct bnx2i_cmd *bnx2i_cmd;
1241 struct iscsi_task *task;
1242 struct iscsi_cmd_rsp *hdr;
1243 u32 datalen = 0;
1244
1245 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1246 spin_lock(&session->lock);
1247 task = iscsi_itt_to_task(conn,
1248 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1249 if (!task)
1250 goto fail;
1251
1252 bnx2i_cmd = task->dd_data;
1253
1254 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1255 conn->datain_pdus_cnt +=
1256 resp_cqe->task_stat.read_stat.num_data_outs;
1257 conn->rxdata_octets +=
1258 bnx2i_cmd->req.total_data_transfer_length;
1259 } else {
1260 conn->dataout_pdus_cnt +=
1261 resp_cqe->task_stat.read_stat.num_data_outs;
1262 conn->r2t_pdus_cnt +=
1263 resp_cqe->task_stat.read_stat.num_r2ts;
1264 conn->txdata_octets +=
1265 bnx2i_cmd->req.total_data_transfer_length;
1266 }
1267 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1268
1269 hdr = (struct iscsi_cmd_rsp *)task->hdr;
1270 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1271 hdr->opcode = resp_cqe->op_code;
1272 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
1273 hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
1274 hdr->response = resp_cqe->response;
1275 hdr->cmd_status = resp_cqe->status;
1276 hdr->flags = resp_cqe->response_flags;
1277 hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
1278
1279 if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
1280 goto done;
1281
1282 if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
1283 datalen = resp_cqe->data_length;
1284 if (datalen < 2)
1285 goto done;
1286
1287 if (datalen > BNX2I_RQ_WQE_SIZE) {
1288 iscsi_conn_printk(KERN_ERR, conn,
1289 "sense data len %d > RQ sz\n",
1290 datalen);
1291 datalen = BNX2I_RQ_WQE_SIZE;
1292 } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
1293 iscsi_conn_printk(KERN_ERR, conn,
1294 "sense data len %d > conn data\n",
1295 datalen);
1296 datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
1297 }
1298
1299 bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
1300 bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
1301 }
1302
1303done:
1304 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1305 conn->data, datalen);
1306fail:
1307 spin_unlock(&session->lock);
1308 return 0;
1309}
1310
1311
1312/**
1313 * bnx2i_process_login_resp - this function handles iscsi login response
1314 * @session: iscsi session pointer
1315 * @bnx2i_conn: iscsi connection pointer
1316 * @cqe: pointer to newly DMA'ed CQE entry for processing
1317 *
1318 * process Login Response CQE & complete it to open-iscsi user daemon
1319 */
1320static int bnx2i_process_login_resp(struct iscsi_session *session,
1321 struct bnx2i_conn *bnx2i_conn,
1322 struct cqe *cqe)
1323{
1324 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1325 struct iscsi_task *task;
1326 struct bnx2i_login_response *login;
1327 struct iscsi_login_rsp *resp_hdr;
1328 int pld_len;
1329 int pad_len;
1330
1331 login = (struct bnx2i_login_response *) cqe;
1332 spin_lock(&session->lock);
1333 task = iscsi_itt_to_task(conn,
1334 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1335 if (!task)
1336 goto done;
1337
1338 resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1339 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1340 resp_hdr->opcode = login->op_code;
1341 resp_hdr->flags = login->response_flags;
1342 resp_hdr->max_version = login->version_max;
1343 resp_hdr->active_version = login->version_active;;
1344 resp_hdr->hlength = 0;
1345
1346 hton24(resp_hdr->dlength, login->data_length);
1347 memcpy(resp_hdr->isid, &login->isid_lo, 6);
1348 resp_hdr->tsih = cpu_to_be16(login->tsih);
1349 resp_hdr->itt = task->hdr->itt;
1350 resp_hdr->statsn = cpu_to_be32(login->stat_sn);
1351 resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
1352 resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
1353 resp_hdr->status_class = login->status_class;
1354 resp_hdr->status_detail = login->status_detail;
1355 pld_len = login->data_length;
1356 bnx2i_conn->gen_pdu.resp_wr_ptr =
1357 bnx2i_conn->gen_pdu.resp_buf + pld_len;
1358
1359 pad_len = 0;
1360 if (pld_len & 0x3)
1361 pad_len = 4 - (pld_len % 4);
1362
1363 if (pad_len) {
1364 int i = 0;
1365 for (i = 0; i < pad_len; i++) {
1366 bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1367 bnx2i_conn->gen_pdu.resp_wr_ptr++;
1368 }
1369 }
1370
1371 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1372 bnx2i_conn->gen_pdu.resp_buf,
1373 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1374done:
1375 spin_unlock(&session->lock);
1376 return 0;
1377}
1378
1379/**
1380 * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1381 * @session: iscsi session pointer
1382 * @bnx2i_conn: iscsi connection pointer
1383 * @cqe: pointer to newly DMA'ed CQE entry for processing
1384 *
1385 * process iSCSI TMF Response CQE and wake up the driver eh thread.
1386 */
1387static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1388 struct bnx2i_conn *bnx2i_conn,
1389 struct cqe *cqe)
1390{
1391 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1392 struct iscsi_task *task;
1393 struct bnx2i_tmf_response *tmf_cqe;
1394 struct iscsi_tm_rsp *resp_hdr;
1395
1396 tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1397 spin_lock(&session->lock);
1398 task = iscsi_itt_to_task(conn,
1399 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1400 if (!task)
1401 goto done;
1402
1403 resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1404 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1405 resp_hdr->opcode = tmf_cqe->op_code;
1406 resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
1407 resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
1408 resp_hdr->itt = task->hdr->itt;
1409 resp_hdr->response = tmf_cqe->response;
1410
1411 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1412done:
1413 spin_unlock(&session->lock);
1414 return 0;
1415}
1416
1417/**
1418 * bnx2i_process_logout_resp - this function handles iscsi logout response
1419 * @session: iscsi session pointer
1420 * @bnx2i_conn: iscsi connection pointer
1421 * @cqe: pointer to newly DMA'ed CQE entry for processing
1422 *
1423 * process iSCSI Logout Response CQE & make function call to
1424 * notify the user daemon.
1425 */
1426static int bnx2i_process_logout_resp(struct iscsi_session *session,
1427 struct bnx2i_conn *bnx2i_conn,
1428 struct cqe *cqe)
1429{
1430 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1431 struct iscsi_task *task;
1432 struct bnx2i_logout_response *logout;
1433 struct iscsi_logout_rsp *resp_hdr;
1434
1435 logout = (struct bnx2i_logout_response *) cqe;
1436 spin_lock(&session->lock);
1437 task = iscsi_itt_to_task(conn,
1438 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1439 if (!task)
1440 goto done;
1441
1442 resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1443 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1444 resp_hdr->opcode = logout->op_code;
1445 resp_hdr->flags = logout->response;
1446 resp_hdr->hlength = 0;
1447
1448 resp_hdr->itt = task->hdr->itt;
1449 resp_hdr->statsn = task->hdr->exp_statsn;
1450 resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
1451 resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
1452
1453 resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
1454 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1455
1456 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1457done:
1458 spin_unlock(&session->lock);
1459 return 0;
1460}
1461
1462/**
1463 * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1464 * @session: iscsi session pointer
1465 * @bnx2i_conn: iscsi connection pointer
1466 * @cqe: pointer to newly DMA'ed CQE entry for processing
1467 *
1468 * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1469 */
1470static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1471 struct bnx2i_conn *bnx2i_conn,
1472 struct cqe *cqe)
1473{
1474 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1475 struct bnx2i_nop_in_msg *nop_in;
1476 struct iscsi_task *task;
1477
1478 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1479 spin_lock(&session->lock);
1480 task = iscsi_itt_to_task(conn,
1481 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1482 if (task)
1483 iscsi_put_task(task);
1484 spin_unlock(&session->lock);
1485}
1486
1487/**
1488 * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1489 * @conn: iscsi connection
1490 *
1491 * Firmware advances RQ producer index for every unsolicited PDU even if
1492 * payload data length is '0'. This function makes corresponding
1493 * adjustments on the driver side to match this f/w behavior
1494 */
1495static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
1496{
1497 char dummy_rq_data[2];
1498 bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
1499 bnx2i_put_rq_buf(bnx2i_conn, 1);
1500}
1501
1502
1503/**
1504 * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1505 * @session: iscsi session pointer
1506 * @bnx2i_conn: iscsi connection pointer
1507 * @cqe: pointer to newly DMA'ed CQE entry for processing
1508 *
1509 * process iSCSI target's proactive iSCSI NOPIN request
1510 */
1511static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1512 struct bnx2i_conn *bnx2i_conn,
1513 struct cqe *cqe)
1514{
1515 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1516 struct iscsi_task *task;
1517 struct bnx2i_nop_in_msg *nop_in;
1518 struct iscsi_nopin *hdr;
1519 u32 itt;
1520 int tgt_async_nop = 0;
1521
1522 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1523 itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
1524
1525 spin_lock(&session->lock);
1526 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1527 memset(hdr, 0, sizeof(struct iscsi_hdr));
1528 hdr->opcode = nop_in->op_code;
1529 hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
1530 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1531 hdr->ttt = cpu_to_be32(nop_in->ttt);
1532
1533 if (itt == (u16) RESERVED_ITT) {
1534 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1535 hdr->itt = RESERVED_ITT;
1536 tgt_async_nop = 1;
1537 goto done;
1538 }
1539
1540 /* this is a response to one of our nop-outs */
1541 task = iscsi_itt_to_task(conn, itt);
1542 if (task) {
1543 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1544 hdr->itt = task->hdr->itt;
1545 hdr->ttt = cpu_to_be32(nop_in->ttt);
1546 memcpy(hdr->lun, nop_in->lun, 8);
1547 }
1548done:
1549 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1550 spin_unlock(&session->lock);
1551
1552 return tgt_async_nop;
1553}
1554
1555
1556/**
1557 * bnx2i_process_async_mesg - this function handles iscsi async message
1558 * @session: iscsi session pointer
1559 * @bnx2i_conn: iscsi connection pointer
1560 * @cqe: pointer to newly DMA'ed CQE entry for processing
1561 *
1562 * process iSCSI ASYNC Message
1563 */
1564static void bnx2i_process_async_mesg(struct iscsi_session *session,
1565 struct bnx2i_conn *bnx2i_conn,
1566 struct cqe *cqe)
1567{
1568 struct bnx2i_async_msg *async_cqe;
1569 struct iscsi_async *resp_hdr;
1570 u8 async_event;
1571
1572 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1573
1574 async_cqe = (struct bnx2i_async_msg *)cqe;
1575 async_event = async_cqe->async_event;
1576
1577 if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
1578 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1579 "async: scsi events not supported\n");
1580 return;
1581 }
1582
1583 spin_lock(&session->lock);
1584 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1585 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1586 resp_hdr->opcode = async_cqe->op_code;
1587 resp_hdr->flags = 0x80;
1588
1589 memcpy(resp_hdr->lun, async_cqe->lun, 8);
1590 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1591 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1592
1593 resp_hdr->async_event = async_cqe->async_event;
1594 resp_hdr->async_vcode = async_cqe->async_vcode;
1595
1596 resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
1597 resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
1598 resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
1599
1600 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1601 (struct iscsi_hdr *)resp_hdr, NULL, 0);
1602 spin_unlock(&session->lock);
1603}
1604
1605
1606/**
1607 * bnx2i_process_reject_mesg - process iscsi reject pdu
1608 * @session: iscsi session pointer
1609 * @bnx2i_conn: iscsi connection pointer
1610 * @cqe: pointer to newly DMA'ed CQE entry for processing
1611 *
1612 * process iSCSI REJECT message
1613 */
1614static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1615 struct bnx2i_conn *bnx2i_conn,
1616 struct cqe *cqe)
1617{
1618 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1619 struct bnx2i_reject_msg *reject;
1620 struct iscsi_reject *hdr;
1621
1622 reject = (struct bnx2i_reject_msg *) cqe;
1623 if (reject->data_length) {
1624 bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
1625 bnx2i_put_rq_buf(bnx2i_conn, 1);
1626 } else
1627 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1628
1629 spin_lock(&session->lock);
1630 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1631 memset(hdr, 0, sizeof(struct iscsi_hdr));
1632 hdr->opcode = reject->op_code;
1633 hdr->reason = reject->reason;
1634 hton24(hdr->dlength, reject->data_length);
1635 hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
1636 hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
1637 hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1638 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1639 reject->data_length);
1640 spin_unlock(&session->lock);
1641}
1642
1643/**
1644 * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1645 * @session: iscsi session pointer
1646 * @bnx2i_conn: iscsi connection pointer
1647 * @cqe: pointer to newly DMA'ed CQE entry for processing
1648 *
1649 * process command cleanup response CQE during conn shutdown or error recovery
1650 */
1651static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1652 struct bnx2i_conn *bnx2i_conn,
1653 struct cqe *cqe)
1654{
1655 struct bnx2i_cleanup_response *cmd_clean_rsp;
1656 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1657 struct iscsi_task *task;
1658
1659 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1660 spin_lock(&session->lock);
1661 task = iscsi_itt_to_task(conn,
1662 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1663 if (!task)
1664 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1665 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1666 spin_unlock(&session->lock);
1667 complete(&bnx2i_conn->cmd_cleanup_cmpl);
1668}
1669
1670
1671
1672/**
1673 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1674 * @bnx2i_conn: iscsi connection
1675 *
1676 * this function is called by generic KCQ handler to process all pending CQE's
1677 */
1678static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1679{
1680 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1681 struct iscsi_session *session = conn->session;
1682 struct qp_info *qp = &bnx2i_conn->ep->qp;
1683 struct bnx2i_nop_in_msg *nopin;
1684 int tgt_async_msg;
1685
1686 while (1) {
1687 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1688 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1689 break;
1690
1691 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
1692 break;
1693
1694 tgt_async_msg = 0;
1695
1696 switch (nopin->op_code) {
1697 case ISCSI_OP_SCSI_CMD_RSP:
1698 case ISCSI_OP_SCSI_DATA_IN:
1699 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
1700 qp->cq_cons_qe);
1701 break;
1702 case ISCSI_OP_LOGIN_RSP:
1703 bnx2i_process_login_resp(session, bnx2i_conn,
1704 qp->cq_cons_qe);
1705 break;
1706 case ISCSI_OP_SCSI_TMFUNC_RSP:
1707 bnx2i_process_tmf_resp(session, bnx2i_conn,
1708 qp->cq_cons_qe);
1709 break;
1710 case ISCSI_OP_LOGOUT_RSP:
1711 bnx2i_process_logout_resp(session, bnx2i_conn,
1712 qp->cq_cons_qe);
1713 break;
1714 case ISCSI_OP_NOOP_IN:
1715 if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
1716 qp->cq_cons_qe))
1717 tgt_async_msg = 1;
1718 break;
1719 case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
1720 bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
1721 qp->cq_cons_qe);
1722 break;
1723 case ISCSI_OP_ASYNC_EVENT:
1724 bnx2i_process_async_mesg(session, bnx2i_conn,
1725 qp->cq_cons_qe);
1726 tgt_async_msg = 1;
1727 break;
1728 case ISCSI_OP_REJECT:
1729 bnx2i_process_reject_mesg(session, bnx2i_conn,
1730 qp->cq_cons_qe);
1731 break;
1732 case ISCSI_OPCODE_CLEANUP_RESPONSE:
1733 bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
1734 qp->cq_cons_qe);
1735 break;
1736 default:
1737 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1738 nopin->op_code);
1739 }
1740
1741 if (!tgt_async_msg)
1742 bnx2i_conn->ep->num_active_cmds--;
1743
1744 /* clear out in production version only, till beta keep opcode
1745 * field intact, will be helpful in debugging (context dump)
1746 * nopin->op_code = 0;
1747 */
1748 qp->cqe_exp_seq_sn++;
1749 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
1750 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1751
1752 if (qp->cq_cons_qe == qp->cq_last_qe) {
1753 qp->cq_cons_qe = qp->cq_first_qe;
1754 qp->cq_cons_idx = 0;
1755 } else {
1756 qp->cq_cons_qe++;
1757 qp->cq_cons_idx++;
1758 }
1759 }
1760 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1761}
1762
1763/**
1764 * bnx2i_fastpath_notification - process global event queue (KCQ)
1765 * @hba: adapter structure pointer
1766 * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
1767 *
1768 * Fast path event notification handler, KCQ entry carries context id
1769 * of the connection that has 1 or more pending CQ entries
1770 */
1771static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1772 struct iscsi_kcqe *new_cqe_kcqe)
1773{
1774 struct bnx2i_conn *conn;
1775 u32 iscsi_cid;
1776
1777 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1778 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1779
1780 if (!conn) {
1781 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1782 return;
1783 }
1784 if (!conn->ep) {
1785 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1786 return;
1787 }
1788
1789 bnx2i_process_new_cqes(conn);
1790}
1791
1792
1793/**
1794 * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
1795 * @hba: adapter structure pointer
1796 * @update_kcqe: kcqe pointer
1797 *
1798 * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
1799 */
1800static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
1801 struct iscsi_kcqe *update_kcqe)
1802{
1803 struct bnx2i_conn *conn;
1804 u32 iscsi_cid;
1805
1806 iscsi_cid = update_kcqe->iscsi_conn_id;
1807 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1808
1809 if (!conn) {
1810 printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
1811 return;
1812 }
1813 if (!conn->ep) {
1814 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
1815 return;
1816 }
1817
1818 if (update_kcqe->completion_status) {
1819 printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
1820 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
1821 } else
1822 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
1823
1824 wake_up_interruptible(&conn->ep->ofld_wait);
1825}
1826
1827
1828/**
1829 * bnx2i_recovery_que_add_conn - add connection to recovery queue
1830 * @hba: adapter structure pointer
1831 * @bnx2i_conn: iscsi connection
1832 *
1833 * Add connection to recovery queue and schedule adapter eh worker
1834 */
1835static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
1836 struct bnx2i_conn *bnx2i_conn)
1837{
1838 iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
1839 ISCSI_ERR_CONN_FAILED);
1840}
1841
1842
1843/**
1844 * bnx2i_process_tcp_error - process error notification on a given connection
1845 *
1846 * @hba: adapter structure pointer
1847 * @tcp_err: tcp error kcqe pointer
1848 *
1849 * handles tcp level error notifications from FW.
1850 */
1851static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
1852 struct iscsi_kcqe *tcp_err)
1853{
1854 struct bnx2i_conn *bnx2i_conn;
1855 u32 iscsi_cid;
1856
1857 iscsi_cid = tcp_err->iscsi_conn_id;
1858 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1859
1860 if (!bnx2i_conn) {
1861 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1862 return;
1863 }
1864
1865 printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
1866 iscsi_cid, tcp_err->completion_status);
1867 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
1868}
1869
1870
1871/**
1872 * bnx2i_process_iscsi_error - process error notification on a given connection
1873 * @hba: adapter structure pointer
1874 * @iscsi_err: iscsi error kcqe pointer
1875 *
1876 * handles iscsi error notifications from the FW. Firmware based in initial
1877 * handshake classifies iscsi protocol / TCP rfc violation into either
1878 * warning or error indications. If indication is of "Error" type, driver
1879 * will initiate session recovery for that connection/session. For
1880 * "Warning" type indication, driver will put out a system log message
1881 * (there will be only one message for each type for the life of the
1882 * session, this is to avoid un-necessarily overloading the system)
1883 */
1884static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
1885 struct iscsi_kcqe *iscsi_err)
1886{
1887 struct bnx2i_conn *bnx2i_conn;
1888 u32 iscsi_cid;
1889 char warn_notice[] = "iscsi_warning";
1890 char error_notice[] = "iscsi_error";
1891 char additional_notice[64];
1892 char *message;
1893 int need_recovery;
1894 u64 err_mask64;
1895
1896 iscsi_cid = iscsi_err->iscsi_conn_id;
1897 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1898 if (!bnx2i_conn) {
1899 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1900 return;
1901 }
1902
1903 err_mask64 = (0x1ULL << iscsi_err->completion_status);
1904
1905 if (err_mask64 & iscsi_error_mask) {
1906 need_recovery = 0;
1907 message = warn_notice;
1908 } else {
1909 need_recovery = 1;
1910 message = error_notice;
1911 }
1912
1913 switch (iscsi_err->completion_status) {
1914 case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
1915 strcpy(additional_notice, "hdr digest err");
1916 break;
1917 case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
1918 strcpy(additional_notice, "data digest err");
1919 break;
1920 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
1921 strcpy(additional_notice, "wrong opcode rcvd");
1922 break;
1923 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
1924 strcpy(additional_notice, "AHS len > 0 rcvd");
1925 break;
1926 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
1927 strcpy(additional_notice, "invalid ITT rcvd");
1928 break;
1929 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
1930 strcpy(additional_notice, "wrong StatSN rcvd");
1931 break;
1932 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
1933 strcpy(additional_notice, "wrong DataSN rcvd");
1934 break;
1935 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
1936 strcpy(additional_notice, "pend R2T violation");
1937 break;
1938 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
1939 strcpy(additional_notice, "ERL0, UO");
1940 break;
1941 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
1942 strcpy(additional_notice, "ERL0, U1");
1943 break;
1944 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
1945 strcpy(additional_notice, "ERL0, U2");
1946 break;
1947 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
1948 strcpy(additional_notice, "ERL0, U3");
1949 break;
1950 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
1951 strcpy(additional_notice, "ERL0, U4");
1952 break;
1953 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
1954 strcpy(additional_notice, "ERL0, U5");
1955 break;
1956 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
1957 strcpy(additional_notice, "ERL0, U6");
1958 break;
1959 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
1960 strcpy(additional_notice, "invalid resi len");
1961 break;
1962 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
1963 strcpy(additional_notice, "MRDSL violation");
1964 break;
1965 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
1966 strcpy(additional_notice, "F-bit not set");
1967 break;
1968 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
1969 strcpy(additional_notice, "invalid TTT");
1970 break;
1971 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
1972 strcpy(additional_notice, "invalid DataSN");
1973 break;
1974 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
1975 strcpy(additional_notice, "burst len violation");
1976 break;
1977 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
1978 strcpy(additional_notice, "buf offset violation");
1979 break;
1980 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
1981 strcpy(additional_notice, "invalid LUN field");
1982 break;
1983 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
1984 strcpy(additional_notice, "invalid R2TSN field");
1985 break;
1986#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
1987 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
1988 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
1989 strcpy(additional_notice, "invalid cmd len1");
1990 break;
1991#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
1992 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
1993 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
1994 strcpy(additional_notice, "invalid cmd len2");
1995 break;
1996 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
1997 strcpy(additional_notice,
1998 "pend r2t exceeds MaxOutstandingR2T value");
1999 break;
2000 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
2001 strcpy(additional_notice, "TTT is rsvd");
2002 break;
2003 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
2004 strcpy(additional_notice, "MBL violation");
2005 break;
2006#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
2007 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2008 case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
2009 strcpy(additional_notice, "data seg len != 0");
2010 break;
2011 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
2012 strcpy(additional_notice, "reject pdu len error");
2013 break;
2014 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
2015 strcpy(additional_notice, "async pdu len error");
2016 break;
2017 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
2018 strcpy(additional_notice, "nopin pdu len error");
2019 break;
2020#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
2021 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2022 case BNX2_ERR_PEND_R2T_IN_CLEANUP:
2023 strcpy(additional_notice, "pend r2t in cleanup");
2024 break;
2025
2026 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
2027 strcpy(additional_notice, "IP fragments rcvd");
2028 break;
2029 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
2030 strcpy(additional_notice, "IP options error");
2031 break;
2032 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
2033 strcpy(additional_notice, "urgent flag error");
2034 break;
2035 default:
2036 printk(KERN_ALERT "iscsi_err - unknown err %x\n",
2037 iscsi_err->completion_status);
2038 }
2039
2040 if (need_recovery) {
2041 iscsi_conn_printk(KERN_ALERT,
2042 bnx2i_conn->cls_conn->dd_data,
2043 "bnx2i: %s - %s\n",
2044 message, additional_notice);
2045
2046 iscsi_conn_printk(KERN_ALERT,
2047 bnx2i_conn->cls_conn->dd_data,
2048 "conn_err - hostno %d conn %p, "
2049 "iscsi_cid %x cid %x\n",
2050 bnx2i_conn->hba->shost->host_no,
2051 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2052 bnx2i_conn->ep->ep_cid);
2053 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2054 } else
2055 if (!test_and_set_bit(iscsi_err->completion_status,
2056 (void *) &bnx2i_conn->violation_notified))
2057 iscsi_conn_printk(KERN_ALERT,
2058 bnx2i_conn->cls_conn->dd_data,
2059 "bnx2i: %s - %s\n",
2060 message, additional_notice);
2061}
2062
2063
2064/**
2065 * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2066 * @hba: adapter structure pointer
2067 * @conn_destroy: conn destroy kcqe pointer
2068 *
2069 * handles connection destroy completion request.
2070 */
2071static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
2072 struct iscsi_kcqe *conn_destroy)
2073{
2074 struct bnx2i_endpoint *ep;
2075
2076 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2077 if (!ep) {
2078 printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
2079 "offload request, unexpected complection\n");
2080 return;
2081 }
2082
2083 if (hba != ep->hba) {
2084 printk(KERN_ALERT "conn destroy- error hba mis-match\n");
2085 return;
2086 }
2087
2088 if (conn_destroy->completion_status) {
2089 printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
2090 ep->state = EP_STATE_CLEANUP_FAILED;
2091 } else
2092 ep->state = EP_STATE_CLEANUP_CMPL;
2093 wake_up_interruptible(&ep->ofld_wait);
2094}
2095
2096
2097/**
2098 * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2099 * @hba: adapter structure pointer
2100 * @ofld_kcqe: conn offload kcqe pointer
2101 *
2102 * handles initial connection offload completion, ep_connect() thread is
2103 * woken-up to continue with LLP connect process
2104 */
2105static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2106 struct iscsi_kcqe *ofld_kcqe)
2107{
2108 u32 cid_addr;
2109 struct bnx2i_endpoint *ep;
2110 u32 cid_num;
2111
2112 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2113 if (!ep) {
2114 printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
2115 return;
2116 }
2117
2118 if (hba != ep->hba) {
2119 printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
2120 return;
2121 }
2122
2123 if (ofld_kcqe->completion_status) {
2124 if (ofld_kcqe->completion_status ==
2125 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2126 printk(KERN_ALERT "bnx2i: unable to allocate"
2127 " iSCSI context resources\n");
2128 ep->state = EP_STATE_OFLD_FAILED;
2129 } else {
2130 ep->state = EP_STATE_OFLD_COMPL;
2131 cid_addr = ofld_kcqe->iscsi_conn_context_id;
2132 cid_num = bnx2i_get_cid_num(ep);
2133 ep->ep_cid = cid_addr;
2134 ep->qp.ctx_base = NULL;
2135 }
2136 wake_up_interruptible(&ep->ofld_wait);
2137}
2138
2139/**
2140 * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2141 * @hba: adapter structure pointer
2142 * @update_kcqe: kcqe pointer
2143 *
2144 * Generic KCQ event handler/dispatcher
2145 */
2146static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
2147 u32 num_cqe)
2148{
2149 struct bnx2i_hba *hba = context;
2150 int i = 0;
2151 struct iscsi_kcqe *ikcqe = NULL;
2152
2153 while (i < num_cqe) {
2154 ikcqe = (struct iscsi_kcqe *) kcqe[i++];
2155
2156 if (ikcqe->op_code ==
2157 ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
2158 bnx2i_fastpath_notification(hba, ikcqe);
2159 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
2160 bnx2i_process_ofld_cmpl(hba, ikcqe);
2161 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
2162 bnx2i_process_update_conn_cmpl(hba, ikcqe);
2163 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
2164 if (ikcqe->completion_status !=
2165 ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
2166 bnx2i_iscsi_license_error(hba, ikcqe->\
2167 completion_status);
2168 else {
2169 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2170 bnx2i_get_link_state(hba);
2171 printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
2172 "ISCSI_INIT passed\n",
2173 (u8)hba->pcidev->bus->number,
2174 hba->pci_devno,
2175 (u8)hba->pci_func);
2176
2177
2178 }
2179 } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
2180 bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
2181 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
2182 bnx2i_process_iscsi_error(hba, ikcqe);
2183 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
2184 bnx2i_process_tcp_error(hba, ikcqe);
2185 else
2186 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2187 ikcqe->op_code);
2188 }
2189}
2190
2191
2192/**
2193 * bnx2i_indicate_netevent - Generic netdev event handler
2194 * @context: adapter structure pointer
2195 * @event: event type
2196 *
2197 * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2198 * NETDEV_GOING_DOWN and NETDEV_CHANGE
2199 */
2200static void bnx2i_indicate_netevent(void *context, unsigned long event)
2201{
2202 struct bnx2i_hba *hba = context;
2203
2204 switch (event) {
2205 case NETDEV_UP:
2206 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
2207 bnx2i_send_fw_iscsi_init_msg(hba);
2208 break;
2209 case NETDEV_DOWN:
2210 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2211 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2212 break;
2213 case NETDEV_GOING_DOWN:
2214 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2215 iscsi_host_for_each_session(hba->shost,
2216 bnx2i_drop_session);
2217 break;
2218 case NETDEV_CHANGE:
2219 bnx2i_get_link_state(hba);
2220 break;
2221 default:
2222 ;
2223 }
2224}
2225
2226
2227/**
2228 * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2229 * @cm_sk: cnic sock structure pointer
2230 *
2231 * function callback exported via bnx2i - cnic driver interface to
2232 * indicate completion of option-2 TCP connect request.
2233 */
2234static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
2235{
2236 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2237
2238 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2239 ep->state = EP_STATE_CONNECT_FAILED;
2240 else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
2241 ep->state = EP_STATE_CONNECT_COMPL;
2242 else
2243 ep->state = EP_STATE_CONNECT_FAILED;
2244
2245 wake_up_interruptible(&ep->ofld_wait);
2246}
2247
2248
2249/**
2250 * bnx2i_cm_close_cmpl - process tcp conn close completion
2251 * @cm_sk: cnic sock structure pointer
2252 *
2253 * function callback exported via bnx2i - cnic driver interface to
2254 * indicate completion of option-2 graceful TCP connect shutdown
2255 */
2256static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
2257{
2258 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2259
2260 ep->state = EP_STATE_DISCONN_COMPL;
2261 wake_up_interruptible(&ep->ofld_wait);
2262}
2263
2264
2265/**
2266 * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2267 * @cm_sk: cnic sock structure pointer
2268 *
2269 * function callback exported via bnx2i - cnic driver interface to
2270 * indicate completion of option-2 abortive TCP connect termination
2271 */
2272static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
2273{
2274 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2275
2276 ep->state = EP_STATE_DISCONN_COMPL;
2277 wake_up_interruptible(&ep->ofld_wait);
2278}
2279
2280
2281/**
2282 * bnx2i_cm_remote_close - process received TCP FIN
2283 * @hba: adapter structure pointer
2284 * @update_kcqe: kcqe pointer
2285 *
2286 * function callback exported via bnx2i - cnic driver interface to indicate
2287 * async TCP events such as FIN
2288 */
2289static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2290{
2291 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2292
2293 ep->state = EP_STATE_TCP_FIN_RCVD;
2294 if (ep->conn)
2295 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2296}
2297
2298/**
2299 * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2300 * @hba: adapter structure pointer
2301 * @update_kcqe: kcqe pointer
2302 *
2303 * function callback exported via bnx2i - cnic driver interface to
2304 * indicate async TCP events (RST) sent by the peer.
2305 */
2306static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2307{
2308 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2309
2310 ep->state = EP_STATE_TCP_RST_RCVD;
2311 if (ep->conn)
2312 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2313}
2314
2315
2316static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
2317 char *buf, u16 buflen)
2318{
2319 struct bnx2i_hba *hba;
2320
2321 hba = bnx2i_find_hba_for_cnic(dev);
2322 if (!hba)
2323 return;
2324
2325 if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
2326 msg_type, buf, buflen))
2327 printk(KERN_ALERT "bnx2i: private nl message send error\n");
2328
2329}
2330
2331
2332/**
2333 * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2334 * carrying callback function pointers
2335 *
2336 */
2337struct cnic_ulp_ops bnx2i_cnic_cb = {
2338 .cnic_init = bnx2i_ulp_init,
2339 .cnic_exit = bnx2i_ulp_exit,
2340 .cnic_start = bnx2i_start,
2341 .cnic_stop = bnx2i_stop,
2342 .indicate_kcqes = bnx2i_indicate_kcqe,
2343 .indicate_netevent = bnx2i_indicate_netevent,
2344 .cm_connect_complete = bnx2i_cm_connect_cmpl,
2345 .cm_close_complete = bnx2i_cm_close_cmpl,
2346 .cm_abort_complete = bnx2i_cm_abort_cmpl,
2347 .cm_remote_close = bnx2i_cm_remote_close,
2348 .cm_remote_abort = bnx2i_cm_remote_abort,
2349 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2350 .owner = THIS_MODULE
2351};
2352
2353
2354/**
2355 * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2356 * @ep: bnx2i endpoint
2357 *
2358 * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2359 * register in BAR #0. Whereas in 57710 these register are accessed by
2360 * mapping BAR #1
2361 */
2362int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2363{
2364 u32 cid_num;
2365 u32 reg_off;
2366 u32 first_l4l5;
2367 u32 ctx_sz;
2368 u32 config2;
2369 resource_size_t reg_base;
2370
2371 cid_num = bnx2i_get_cid_num(ep);
2372
2373 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2374 reg_base = pci_resource_start(ep->hba->pcidev,
2375 BNX2X_DOORBELL_PCI_BAR);
2376 reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
2377 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2378 goto arm_cq;
2379 }
2380
2381 reg_base = ep->hba->netdev->base_addr;
2382 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2383 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2384 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2385 first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2387 if (ctx_sz)
2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2389 + PAGE_SIZE *
2390 (((cid_num - first_l4l5) / ctx_sz) + 256);
2391 else
2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2393 } else
2394 /* 5709 device in normal node and 5706/5708 devices */
2395 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2396
2397 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
2398 MB_KERNEL_CTX_SIZE);
2399 if (!ep->qp.ctx_base)
2400 return -ENOMEM;
2401
2402arm_cq:
2403 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
2404 return 0;
2405}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 000000000000..ae4b2d588fd3
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include "bnx2i.h"
15
16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count;
18static int bnx2i_reg_device;
19
20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.0.1d"
22#define DRV_MODULE_RELDATE "Mar 25, 2009"
23
24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
26 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28
29MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
30MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
31MODULE_LICENSE("GPL");
32MODULE_VERSION(DRV_MODULE_VERSION);
33
34static DEFINE_RWLOCK(bnx2i_dev_lock);
35
36unsigned int event_coal_div = 1;
37module_param(event_coal_div, int, 0664);
38MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
39
40unsigned int en_tcp_dack = 1;
41module_param(en_tcp_dack, int, 0664);
42MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
43
44unsigned int error_mask1 = 0x00;
45module_param(error_mask1, int, 0664);
46MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
47
48unsigned int error_mask2 = 0x00;
49module_param(error_mask2, int, 0664);
50MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
51
52unsigned int sq_size;
53module_param(sq_size, int, 0664);
54MODULE_PARM_DESC(sq_size, "Configure SQ size");
55
56unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
57module_param(rq_size, int, 0664);
58MODULE_PARM_DESC(rq_size, "Configure RQ size");
59
60u64 iscsi_error_mask = 0x00;
61
62static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
63
64
65/**
66 * bnx2i_identify_device - identifies NetXtreme II device type
67 * @hba: Adapter structure pointer
68 *
69 * This function identifies the NX2 device type and sets appropriate
70 * queue mailbox register access method, 5709 requires driver to
71 * access MBOX regs using *bin* mode
72 */
73void bnx2i_identify_device(struct bnx2i_hba *hba)
74{
75 hba->cnic_dev_type = 0;
76 if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
77 (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
78 set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
79 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
80 (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
81 set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
82 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
83 (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
84 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
85 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
86 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
87 hba->pci_did == PCI_DEVICE_ID_NX2_57711)
88 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
89}
90
91
92/**
93 * get_adapter_list_head - returns head of adapter list
94 */
95struct bnx2i_hba *get_adapter_list_head(void)
96{
97 struct bnx2i_hba *hba = NULL;
98 struct bnx2i_hba *tmp_hba;
99
100 if (!adapter_count)
101 goto hba_not_found;
102
103 read_lock(&bnx2i_dev_lock);
104 list_for_each_entry(tmp_hba, &adapter_list, link) {
105 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
106 hba = tmp_hba;
107 break;
108 }
109 }
110 read_unlock(&bnx2i_dev_lock);
111hba_not_found:
112 return hba;
113}
114
115
116/**
117 * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
118 * @cnic: pointer to cnic device instance
119 *
120 */
121struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
122{
123 struct bnx2i_hba *hba, *temp;
124
125 read_lock(&bnx2i_dev_lock);
126 list_for_each_entry_safe(hba, temp, &adapter_list, link) {
127 if (hba->cnic == cnic) {
128 read_unlock(&bnx2i_dev_lock);
129 return hba;
130 }
131 }
132 read_unlock(&bnx2i_dev_lock);
133 return NULL;
134}
135
136
137/**
138 * bnx2i_start - cnic callback to initialize & start adapter instance
139 * @handle: transparent handle pointing to adapter structure
140 *
141 * This function maps adapter structure to pcidev structure and initiates
142 * firmware handshake to enable/initialize on chip iscsi components
143 * This bnx2i - cnic interface api callback is issued after following
144 * 2 conditions are met -
145 * a) underlying network interface is up (marked by event 'NETDEV_UP'
146 * from netdev
147 * b) bnx2i adapter instance is registered
148 */
149void bnx2i_start(void *handle)
150{
151#define BNX2I_INIT_POLL_TIME (1000 / HZ)
152 struct bnx2i_hba *hba = handle;
153 int i = HZ;
154
155 bnx2i_send_fw_iscsi_init_msg(hba);
156 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
157 msleep(BNX2I_INIT_POLL_TIME);
158}
159
160
161/**
162 * bnx2i_stop - cnic callback to shutdown adapter instance
163 * @handle: transparent handle pointing to adapter structure
164 *
165 * driver checks if adapter is already in shutdown mode, if not start
166 * the shutdown process
167 */
168void bnx2i_stop(void *handle)
169{
170 struct bnx2i_hba *hba = handle;
171
172 /* check if cleanup happened in GOING_DOWN context */
173 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
174 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
175 &hba->adapter_state))
176 iscsi_host_for_each_session(hba->shost,
177 bnx2i_drop_session);
178}
179
180/**
181 * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
182 * @hba: Adapter instance to register
183 *
184 * registers bnx2i adapter instance with the cnic driver while holding the
185 * adapter structure lock
186 */
187void bnx2i_register_device(struct bnx2i_hba *hba)
188{
189 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
190 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
191 return;
192 }
193
194 hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
195
196 spin_lock(&hba->lock);
197 bnx2i_reg_device++;
198 spin_unlock(&hba->lock);
199
200 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
201}
202
203
204/**
205 * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
206 *
207 * registers all bnx2i adapter instances with the cnic driver while holding
208 * the global resource lock
209 */
210void bnx2i_reg_dev_all(void)
211{
212 struct bnx2i_hba *hba, *temp;
213
214 read_lock(&bnx2i_dev_lock);
215 list_for_each_entry_safe(hba, temp, &adapter_list, link)
216 bnx2i_register_device(hba);
217 read_unlock(&bnx2i_dev_lock);
218}
219
220
221/**
222 * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
223 * @hba: Adapter instance to unregister
224 *
225 * registers bnx2i adapter instance with the cnic driver while holding
226 * the adapter structure lock
227 */
228static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
229{
230 if (hba->ofld_conns_active ||
231 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
232 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
233 return;
234
235 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
236
237 spin_lock(&hba->lock);
238 bnx2i_reg_device--;
239 spin_unlock(&hba->lock);
240
241 /* ep_disconnect could come before NETDEV_DOWN, driver won't
242 * see NETDEV_DOWN as it already unregistered itself.
243 */
244 hba->adapter_state = 0;
245 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
246}
247
248/**
249 * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
250 *
251 * unregisters all bnx2i adapter instances with the cnic driver while holding
252 * the global resource lock
253 */
254void bnx2i_unreg_dev_all(void)
255{
256 struct bnx2i_hba *hba, *temp;
257
258 read_lock(&bnx2i_dev_lock);
259 list_for_each_entry_safe(hba, temp, &adapter_list, link)
260 bnx2i_unreg_one_device(hba);
261 read_unlock(&bnx2i_dev_lock);
262}
263
264
265/**
266 * bnx2i_init_one - initialize an adapter instance and allocate memory resources
267 * @hba: bnx2i adapter instance
268 * @cnic: cnic device handle
269 *
270 * Global resource lock and host adapter lock is held during critical sections
271 * below. This routine is called from cnic_register_driver() context and
272 * work horse thread which does majority of device specific initialization
273 */
274static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
275{
276 int rc;
277
278 read_lock(&bnx2i_dev_lock);
279 if (bnx2i_reg_device &&
280 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
281 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
282 if (rc) /* duplicate registration */
283 printk(KERN_ERR "bnx2i- dev reg failed\n");
284
285 spin_lock(&hba->lock);
286 bnx2i_reg_device++;
287 hba->age++;
288 spin_unlock(&hba->lock);
289
290 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
291 }
292 read_unlock(&bnx2i_dev_lock);
293
294 write_lock(&bnx2i_dev_lock);
295 list_add_tail(&hba->link, &adapter_list);
296 adapter_count++;
297 write_unlock(&bnx2i_dev_lock);
298 return 0;
299}
300
301
302/**
303 * bnx2i_ulp_init - initialize an adapter instance
304 * @dev: cnic device handle
305 *
306 * Called from cnic_register_driver() context to initialize all enumerated
307 * cnic devices. This routine allocate adapter structure and other
308 * device specific resources.
309 */
310void bnx2i_ulp_init(struct cnic_dev *dev)
311{
312 struct bnx2i_hba *hba;
313
314 /* Allocate a HBA structure for this device */
315 hba = bnx2i_alloc_hba(dev);
316 if (!hba) {
317 printk(KERN_ERR "bnx2i init: hba initialization failed\n");
318 return;
319 }
320
321 /* Get PCI related information and update hba struct members */
322 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
323 if (bnx2i_init_one(hba, dev)) {
324 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
325 bnx2i_free_hba(hba);
326 } else
327 hba->cnic = dev;
328}
329
330
331/**
332 * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
333 * @dev: cnic device handle
334 *
335 */
336void bnx2i_ulp_exit(struct cnic_dev *dev)
337{
338 struct bnx2i_hba *hba;
339
340 hba = bnx2i_find_hba_for_cnic(dev);
341 if (!hba) {
342 printk(KERN_INFO "bnx2i_ulp_exit: hba not "
343 "found, dev 0x%p\n", dev);
344 return;
345 }
346 write_lock(&bnx2i_dev_lock);
347 list_del_init(&hba->link);
348 adapter_count--;
349
350 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
351 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
352 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
353
354 spin_lock(&hba->lock);
355 bnx2i_reg_device--;
356 spin_unlock(&hba->lock);
357 }
358 write_unlock(&bnx2i_dev_lock);
359
360 bnx2i_free_hba(hba);
361}
362
363
364/**
365 * bnx2i_mod_init - module init entry point
366 *
367 * initialize any driver wide global data structures such as endpoint pool,
368 * tcp port manager/queue, sysfs. finally driver will register itself
369 * with the cnic module
370 */
371static int __init bnx2i_mod_init(void)
372{
373 int err;
374
375 printk(KERN_INFO "%s", version);
376
377 if (!is_power_of_2(sq_size))
378 sq_size = roundup_pow_of_two(sq_size);
379
380 bnx2i_scsi_xport_template =
381 iscsi_register_transport(&bnx2i_iscsi_transport);
382 if (!bnx2i_scsi_xport_template) {
383 printk(KERN_ERR "Could not register bnx2i transport.\n");
384 err = -ENOMEM;
385 goto out;
386 }
387
388 err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
389 if (err) {
390 printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
391 goto unreg_xport;
392 }
393
394 return 0;
395
396unreg_xport:
397 iscsi_unregister_transport(&bnx2i_iscsi_transport);
398out:
399 return err;
400}
401
402
403/**
404 * bnx2i_mod_exit - module cleanup/exit entry point
405 *
406 * Global resource lock and host adapter lock is held during critical sections
407 * in this function. Driver will browse through the adapter list, cleans-up
408 * each instance, unregisters iscsi transport name and finally driver will
409 * unregister itself with the cnic module
410 */
411static void __exit bnx2i_mod_exit(void)
412{
413 struct bnx2i_hba *hba;
414
415 write_lock(&bnx2i_dev_lock);
416 while (!list_empty(&adapter_list)) {
417 hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
418 list_del(&hba->link);
419 adapter_count--;
420
421 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
422 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
423 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
424 bnx2i_reg_device--;
425 }
426
427 write_unlock(&bnx2i_dev_lock);
428 bnx2i_free_hba(hba);
429 write_lock(&bnx2i_dev_lock);
430 }
431 write_unlock(&bnx2i_dev_lock);
432
433 iscsi_unregister_transport(&bnx2i_iscsi_transport);
434 cnic_unregister_driver(CNIC_ULP_ISCSI);
435}
436
437module_init(bnx2i_mod_init);
438module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 000000000000..f7412196f2f8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 *
4 * Copyright (c) 2006 - 2009 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 */
14
15#include <scsi/scsi_tcq.h>
16#include <scsi/libiscsi.h>
17#include "bnx2i.h"
18
19struct scsi_transport_template *bnx2i_scsi_xport_template;
20struct iscsi_transport bnx2i_iscsi_transport;
21static struct scsi_host_template bnx2i_host_template;
22
23/*
24 * Global endpoint resource info
25 */
26static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
27
28
29static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
30{
31 int retval = 0;
32
33 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
34 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
35 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
36 retval = -EPERM;
37 return retval;
38}
39
40/**
41 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
42 * @cmd: iscsi cmd struct pointer
43 * @buf_off: absolute buffer offset
44 * @start_bd_off: u32 pointer to return the offset within the BD
45 * indicated by 'start_bd_idx' on which 'buf_off' falls
46 * @start_bd_idx: index of the BD on which 'buf_off' falls
47 *
48 * identifies & marks various bd info for scsi command's imm data,
49 * unsolicited data and the first solicited data seq.
50 */
51static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
52 u32 *start_bd_off, u32 *start_bd_idx)
53{
54 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
55 u32 cur_offset = 0;
56 u32 cur_bd_idx = 0;
57
58 if (buf_off) {
59 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
60 cur_offset += bd_tbl->buffer_length;
61 cur_bd_idx++;
62 bd_tbl++;
63 }
64 }
65
66 *start_bd_off = buf_off - cur_offset;
67 *start_bd_idx = cur_bd_idx;
68}
69
70/**
71 * bnx2i_setup_write_cmd_bd_info - sets up BD various information
72 * @task: transport layer's cmd struct pointer
73 *
74 * identifies & marks various bd info for scsi command's immediate data,
75 * unsolicited data and first solicited data seq which includes BD start
76 * index & BD buf off. his function takes into account iscsi parameter such
77 * as immediate data and unsolicited data is support on this connection.
78 */
79static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
80{
81 struct bnx2i_cmd *cmd = task->dd_data;
82 u32 start_bd_offset;
83 u32 start_bd_idx;
84 u32 buffer_offset = 0;
85 u32 cmd_len = cmd->req.total_data_transfer_length;
86
87 /* if ImmediateData is turned off & IntialR2T is turned on,
88 * there will be no immediate or unsolicited data, just return.
89 */
90 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
91 return;
92
93 /* Immediate data */
94 buffer_offset += task->imm_count;
95 if (task->imm_count == cmd_len)
96 return;
97
98 if (iscsi_task_has_unsol_data(task)) {
99 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
100 &start_bd_offset, &start_bd_idx);
101 cmd->req.ud_buffer_offset = start_bd_offset;
102 cmd->req.ud_start_bd_index = start_bd_idx;
103 buffer_offset += task->unsol_r2t.data_length;
104 }
105
106 if (buffer_offset != cmd_len) {
107 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
108 &start_bd_offset, &start_bd_idx);
109 if ((start_bd_offset > task->conn->session->first_burst) ||
110 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
111 int i = 0;
112
113 iscsi_conn_printk(KERN_ALERT, task->conn,
114 "bnx2i- error, buf offset 0x%x "
115 "bd_valid %d use_sg %d\n",
116 buffer_offset, cmd->io_tbl.bd_valid,
117 scsi_sg_count(cmd->scsi_cmd));
118 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
119 iscsi_conn_printk(KERN_ALERT, task->conn,
120 "bnx2i err, bd[%d]: len %x\n",
121 i, cmd->io_tbl.bd_tbl[i].\
122 buffer_length);
123 }
124 cmd->req.sd_buffer_offset = start_bd_offset;
125 cmd->req.sd_start_bd_index = start_bd_idx;
126 }
127}
128
129
130
131/**
132 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
133 * @hba: adapter instance
134 * @cmd: iscsi cmd struct pointer
135 *
136 * map SG list
137 */
138static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
139{
140 struct scsi_cmnd *sc = cmd->scsi_cmd;
141 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
142 struct scatterlist *sg;
143 int byte_count = 0;
144 int bd_count = 0;
145 int sg_count;
146 int sg_len;
147 u64 addr;
148 int i;
149
150 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
151
152 sg_count = scsi_dma_map(sc);
153
154 scsi_for_each_sg(sc, sg, sg_count, i) {
155 sg_len = sg_dma_len(sg);
156 addr = (u64) sg_dma_address(sg);
157 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
158 bd[bd_count].buffer_addr_hi = addr >> 32;
159 bd[bd_count].buffer_length = sg_len;
160 bd[bd_count].flags = 0;
161 if (bd_count == 0)
162 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
163
164 byte_count += sg_len;
165 bd_count++;
166 }
167
168 if (bd_count)
169 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
170
171 BUG_ON(byte_count != scsi_bufflen(sc));
172 return bd_count;
173}
174
175/**
176 * bnx2i_iscsi_map_sg_list - maps SG list
177 * @cmd: iscsi cmd struct pointer
178 *
179 * creates BD list table for the command
180 */
181static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
182{
183 int bd_count;
184
185 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
186 if (!bd_count) {
187 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
188
189 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
190 bd[0].buffer_length = bd[0].flags = 0;
191 }
192 cmd->io_tbl.bd_valid = bd_count;
193}
194
195
196/**
197 * bnx2i_iscsi_unmap_sg_list - unmaps SG list
198 * @cmd: iscsi cmd struct pointer
199 *
200 * unmap IO buffers and invalidate the BD table
201 */
202void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
203{
204 struct scsi_cmnd *sc = cmd->scsi_cmd;
205
206 if (cmd->io_tbl.bd_valid && sc) {
207 scsi_dma_unmap(sc);
208 cmd->io_tbl.bd_valid = 0;
209 }
210}
211
212static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
213{
214 memset(&cmd->req, 0x00, sizeof(cmd->req));
215 cmd->req.op_code = 0xFF;
216 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
217 cmd->req.bd_list_addr_hi =
218 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
219
220}
221
222
223/**
224 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
225 * @hba: pointer to adapter instance
226 * @conn: pointer to iscsi connection
227 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
228 *
229 * update iscsi cid table entry with connection pointer. This enables
230 * driver to quickly get hold of connection structure pointer in
231 * completion/interrupt thread using iscsi context ID
232 */
233static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
234 struct bnx2i_conn *bnx2i_conn,
235 u32 iscsi_cid)
236{
237 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
238 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
239 "conn bind - entry #%d not free\n", iscsi_cid);
240 return -EBUSY;
241 }
242
243 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
244 return 0;
245}
246
247
248/**
249 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
250 * @hba: pointer to adapter instance
251 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
252 */
253struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
254 u16 iscsi_cid)
255{
256 if (!hba->cid_que.conn_cid_tbl) {
257 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
258 return NULL;
259
260 } else if (iscsi_cid >= hba->max_active_conns) {
261 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
262 return NULL;
263 }
264 return hba->cid_que.conn_cid_tbl[iscsi_cid];
265}
266
267
268/**
269 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
270 * @hba: pointer to adapter instance
271 */
272static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
273{
274 int idx;
275
276 if (!hba->cid_que.cid_free_cnt)
277 return -1;
278
279 idx = hba->cid_que.cid_q_cons_idx;
280 hba->cid_que.cid_q_cons_idx++;
281 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
282 hba->cid_que.cid_q_cons_idx = 0;
283
284 hba->cid_que.cid_free_cnt--;
285 return hba->cid_que.cid_que[idx];
286}
287
288
289/**
290 * bnx2i_free_iscsi_cid - returns tcp port to free list
291 * @hba: pointer to adapter instance
292 * @iscsi_cid: iscsi context ID to free
293 */
294static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
295{
296 int idx;
297
298 if (iscsi_cid == (u16) -1)
299 return;
300
301 hba->cid_que.cid_free_cnt++;
302
303 idx = hba->cid_que.cid_q_prod_idx;
304 hba->cid_que.cid_que[idx] = iscsi_cid;
305 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
306 hba->cid_que.cid_q_prod_idx++;
307 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
308 hba->cid_que.cid_q_prod_idx = 0;
309}
310
311
312/**
313 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
314 * @hba: pointer to adapter instance
315 *
316 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
317 * and initialize table attributes
318 */
319static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
320{
321 int mem_size;
322 int i;
323
324 mem_size = hba->max_active_conns * sizeof(u32);
325 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
326
327 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
328 if (!hba->cid_que.cid_que_base)
329 return -ENOMEM;
330
331 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
332 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
333 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
334 if (!hba->cid_que.conn_cid_tbl) {
335 kfree(hba->cid_que.cid_que_base);
336 hba->cid_que.cid_que_base = NULL;
337 return -ENOMEM;
338 }
339
340 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
341 hba->cid_que.cid_q_prod_idx = 0;
342 hba->cid_que.cid_q_cons_idx = 0;
343 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
344 hba->cid_que.cid_free_cnt = hba->max_active_conns;
345
346 for (i = 0; i < hba->max_active_conns; i++) {
347 hba->cid_que.cid_que[i] = i;
348 hba->cid_que.conn_cid_tbl[i] = NULL;
349 }
350 return 0;
351}
352
353
354/**
355 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
356 * @hba: pointer to adapter instance
357 */
358static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
359{
360 kfree(hba->cid_que.cid_que_base);
361 hba->cid_que.cid_que_base = NULL;
362
363 kfree(hba->cid_que.conn_cid_tbl);
364 hba->cid_que.conn_cid_tbl = NULL;
365}
366
367
368/**
369 * bnx2i_alloc_ep - allocates ep structure from global pool
370 * @hba: pointer to adapter instance
371 *
372 * routine allocates a free endpoint structure from global pool and
373 * a tcp port to be used for this connection. Global resource lock,
374 * 'bnx2i_resc_lock' is held while accessing shared global data structures
375 */
376static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
377{
378 struct iscsi_endpoint *ep;
379 struct bnx2i_endpoint *bnx2i_ep;
380
381 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
382 if (!ep) {
383 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
384 return NULL;
385 }
386
387 bnx2i_ep = ep->dd_data;
388 INIT_LIST_HEAD(&bnx2i_ep->link);
389 bnx2i_ep->state = EP_STATE_IDLE;
390 bnx2i_ep->hba = hba;
391 bnx2i_ep->hba_age = hba->age;
392 hba->ofld_conns_active++;
393 init_waitqueue_head(&bnx2i_ep->ofld_wait);
394 return ep;
395}
396
397
398/**
399 * bnx2i_free_ep - free endpoint
400 * @ep: pointer to iscsi endpoint structure
401 */
402static void bnx2i_free_ep(struct iscsi_endpoint *ep)
403{
404 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
405 unsigned long flags;
406
407 spin_lock_irqsave(&bnx2i_resc_lock, flags);
408 bnx2i_ep->state = EP_STATE_IDLE;
409 bnx2i_ep->hba->ofld_conns_active--;
410
411 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
412 if (bnx2i_ep->conn) {
413 bnx2i_ep->conn->ep = NULL;
414 bnx2i_ep->conn = NULL;
415 }
416
417 bnx2i_ep->hba = NULL;
418 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
419 iscsi_destroy_endpoint(ep);
420}
421
422
423/**
424 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
425 * @hba: adapter instance pointer
426 * @session: iscsi session pointer
427 * @cmd: iscsi command structure
428 */
429static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
430 struct bnx2i_cmd *cmd)
431{
432 struct io_bdt *io = &cmd->io_tbl;
433 struct iscsi_bd *bd;
434
435 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
436 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
437 &io->bd_tbl_dma, GFP_KERNEL);
438 if (!io->bd_tbl) {
439 iscsi_session_printk(KERN_ERR, session, "Could not "
440 "allocate bdt.\n");
441 return -ENOMEM;
442 }
443 io->bd_valid = 0;
444 return 0;
445}
446
447/**
448 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
449 * @hba: adapter instance pointer
450 * @session: iscsi session pointer
451 * @cmd: iscsi command structure
452 */
453static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
454 struct iscsi_session *session)
455{
456 int i;
457
458 for (i = 0; i < session->cmds_max; i++) {
459 struct iscsi_task *task = session->cmds[i];
460 struct bnx2i_cmd *cmd = task->dd_data;
461
462 if (cmd->io_tbl.bd_tbl)
463 dma_free_coherent(&hba->pcidev->dev,
464 ISCSI_MAX_BDS_PER_CMD *
465 sizeof(struct iscsi_bd),
466 cmd->io_tbl.bd_tbl,
467 cmd->io_tbl.bd_tbl_dma);
468 }
469
470}
471
472
473/**
474 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
475 * @hba: adapter instance pointer
476 * @session: iscsi session pointer
477 */
478static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
479 struct iscsi_session *session)
480{
481 int i;
482
483 for (i = 0; i < session->cmds_max; i++) {
484 struct iscsi_task *task = session->cmds[i];
485 struct bnx2i_cmd *cmd = task->dd_data;
486
487 /* Anil */
488 task->hdr = &cmd->hdr;
489 task->hdr_max = sizeof(struct iscsi_hdr);
490
491 if (bnx2i_alloc_bdt(hba, session, cmd))
492 goto free_bdts;
493 }
494
495 return 0;
496
497free_bdts:
498 bnx2i_destroy_cmd_pool(hba, session);
499 return -ENOMEM;
500}
501
502
503/**
504 * bnx2i_setup_mp_bdt - allocate BD table resources
505 * @hba: pointer to adapter structure
506 *
507 * Allocate memory for dummy buffer and associated BD
508 * table to be used by middle path (MP) requests
509 */
510static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
511{
512 int rc = 0;
513 struct iscsi_bd *mp_bdt;
514 u64 addr;
515
516 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
517 &hba->mp_bd_dma, GFP_KERNEL);
518 if (!hba->mp_bd_tbl) {
519 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
520 rc = -1;
521 goto out;
522 }
523
524 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
525 &hba->dummy_buf_dma, GFP_KERNEL);
526 if (!hba->dummy_buffer) {
527 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
528 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
529 hba->mp_bd_tbl, hba->mp_bd_dma);
530 hba->mp_bd_tbl = NULL;
531 rc = -1;
532 goto out;
533 }
534
535 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
536 addr = (unsigned long) hba->dummy_buf_dma;
537 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
538 mp_bdt->buffer_addr_hi = addr >> 32;
539 mp_bdt->buffer_length = PAGE_SIZE;
540 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
541 ISCSI_BD_FIRST_IN_BD_CHAIN;
542out:
543 return rc;
544}
545
546
547/**
548 * bnx2i_free_mp_bdt - releases ITT back to free pool
549 * @hba: pointer to adapter instance
550 *
551 * free MP dummy buffer and associated BD table
552 */
553static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
554{
555 if (hba->mp_bd_tbl) {
556 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
557 hba->mp_bd_tbl, hba->mp_bd_dma);
558 hba->mp_bd_tbl = NULL;
559 }
560 if (hba->dummy_buffer) {
561 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
562 hba->dummy_buffer, hba->dummy_buf_dma);
563 hba->dummy_buffer = NULL;
564 }
565 return;
566}
567
568/**
569 * bnx2i_drop_session - notifies iscsid of connection error.
570 * @hba: adapter instance pointer
571 * @session: iscsi session pointer
572 *
573 * This notifies iscsid that there is a error, so it can initiate
574 * recovery.
575 *
576 * This relies on caller using the iscsi class iterator so the object
577 * is refcounted and does not disapper from under us.
578 */
579void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
580{
581 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
582}
583
584/**
585 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
586 * @hba: pointer to adapter instance
587 * @ep: pointer to endpoint (transport indentifier) structure
588 *
589 * EP destroy queue manager
590 */
591static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
592 struct bnx2i_endpoint *ep)
593{
594 write_lock_bh(&hba->ep_rdwr_lock);
595 list_add_tail(&ep->link, &hba->ep_destroy_list);
596 write_unlock_bh(&hba->ep_rdwr_lock);
597 return 0;
598}
599
600/**
601 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
602 *
603 * @hba: pointer to adapter instance
604 * @ep: pointer to endpoint (transport indentifier) structure
605 *
606 * EP destroy queue manager
607 */
608static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
609 struct bnx2i_endpoint *ep)
610{
611 write_lock_bh(&hba->ep_rdwr_lock);
612 list_del_init(&ep->link);
613 write_unlock_bh(&hba->ep_rdwr_lock);
614
615 return 0;
616}
617
618/**
619 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
620 * @hba: pointer to adapter instance
621 * @ep: pointer to endpoint (transport indentifier) structure
622 *
623 * pending conn offload completion queue manager
624 */
625static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
626 struct bnx2i_endpoint *ep)
627{
628 write_lock_bh(&hba->ep_rdwr_lock);
629 list_add_tail(&ep->link, &hba->ep_ofld_list);
630 write_unlock_bh(&hba->ep_rdwr_lock);
631 return 0;
632}
633
634/**
635 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
636 * @hba: pointer to adapter instance
637 * @ep: pointer to endpoint (transport indentifier) structure
638 *
639 * pending conn offload completion queue manager
640 */
641static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
642 struct bnx2i_endpoint *ep)
643{
644 write_lock_bh(&hba->ep_rdwr_lock);
645 list_del_init(&ep->link);
646 write_unlock_bh(&hba->ep_rdwr_lock);
647 return 0;
648}
649
650
651/**
652 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
653 *
654 * @hba: pointer to adapter instance
655 * @iscsi_cid: iscsi context ID to find
656 *
657 */
658struct bnx2i_endpoint *
659bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
660{
661 struct list_head *list;
662 struct list_head *tmp;
663 struct bnx2i_endpoint *ep;
664
665 read_lock_bh(&hba->ep_rdwr_lock);
666 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
667 ep = (struct bnx2i_endpoint *)list;
668
669 if (ep->ep_iscsi_cid == iscsi_cid)
670 break;
671 ep = NULL;
672 }
673 read_unlock_bh(&hba->ep_rdwr_lock);
674
675 if (!ep)
676 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
677 return ep;
678}
679
680
681/**
682 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
683 * @hba: pointer to adapter instance
684 * @iscsi_cid: iscsi context ID to find
685 *
686 */
687struct bnx2i_endpoint *
688bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
689{
690 struct list_head *list;
691 struct list_head *tmp;
692 struct bnx2i_endpoint *ep;
693
694 read_lock_bh(&hba->ep_rdwr_lock);
695 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
696 ep = (struct bnx2i_endpoint *)list;
697
698 if (ep->ep_iscsi_cid == iscsi_cid)
699 break;
700 ep = NULL;
701 }
702 read_unlock_bh(&hba->ep_rdwr_lock);
703
704 if (!ep)
705 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
706
707 return ep;
708}
709
710/**
711 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
712 * @hba: pointer to adapter instance
713 * @shost: scsi host pointer
714 *
715 * Initializes 'can_queue' parameter based on how many outstanding commands
716 * the device can handle. Each device 5708/5709/57710 has different
717 * capabilities
718 */
719static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
720 struct Scsi_Host *shost)
721{
722 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
723 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
724 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
725 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
726 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
727 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
728 else
729 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
730}
731
732
733/**
734 * bnx2i_alloc_hba - allocate and init adapter instance
735 * @cnic: cnic device pointer
736 *
737 * allocate & initialize adapter structure and call other
738 * support routines to do per adapter initialization
739 */
740struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
741{
742 struct Scsi_Host *shost;
743 struct bnx2i_hba *hba;
744
745 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
746 if (!shost)
747 return NULL;
748 shost->dma_boundary = cnic->pcidev->dma_mask;
749 shost->transportt = bnx2i_scsi_xport_template;
750 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
751 shost->max_channel = 0;
752 shost->max_lun = 512;
753 shost->max_cmd_len = 16;
754
755 hba = iscsi_host_priv(shost);
756 hba->shost = shost;
757 hba->netdev = cnic->netdev;
758 /* Get PCI related information and update hba struct members */
759 hba->pcidev = cnic->pcidev;
760 pci_dev_get(hba->pcidev);
761 hba->pci_did = hba->pcidev->device;
762 hba->pci_vid = hba->pcidev->vendor;
763 hba->pci_sdid = hba->pcidev->subsystem_device;
764 hba->pci_svid = hba->pcidev->subsystem_vendor;
765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
767 bnx2i_identify_device(hba);
768
769 bnx2i_identify_device(hba);
770 bnx2i_setup_host_queue_size(hba, shost);
771
772 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
773 hba->regview = ioremap_nocache(hba->netdev->base_addr,
774 BNX2_MQ_CONFIG2);
775 if (!hba->regview)
776 goto ioreg_map_err;
777 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
778 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
779 if (!hba->regview)
780 goto ioreg_map_err;
781 }
782
783 if (bnx2i_setup_mp_bdt(hba))
784 goto mp_bdt_mem_err;
785
786 INIT_LIST_HEAD(&hba->ep_ofld_list);
787 INIT_LIST_HEAD(&hba->ep_destroy_list);
788 rwlock_init(&hba->ep_rdwr_lock);
789
790 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
791
792 /* different values for 5708/5709/57710 */
793 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
794
795 if (bnx2i_setup_free_cid_que(hba))
796 goto cid_que_err;
797
798 /* SQ/RQ/CQ size can be changed via sysfx interface */
799 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
800 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
801 hba->max_sqes = sq_size;
802 else
803 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
804 } else { /* 5706/5708/5709 */
805 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
806 hba->max_sqes = sq_size;
807 else
808 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
809 }
810
811 hba->max_rqes = rq_size;
812 hba->max_cqes = hba->max_sqes + rq_size;
813 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
814 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
815 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
816 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
817 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
818
819 hba->num_ccell = hba->max_sqes / 2;
820
821 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock);
823
824 if (iscsi_host_add(shost, &hba->pcidev->dev))
825 goto free_dump_mem;
826 return hba;
827
828free_dump_mem:
829 bnx2i_release_free_cid_que(hba);
830cid_que_err:
831 bnx2i_free_mp_bdt(hba);
832mp_bdt_mem_err:
833 if (hba->regview) {
834 iounmap(hba->regview);
835 hba->regview = NULL;
836 }
837ioreg_map_err:
838 pci_dev_put(hba->pcidev);
839 scsi_host_put(shost);
840 return NULL;
841}
842
843/**
844 * bnx2i_free_hba- releases hba structure and resources held by the adapter
845 * @hba: pointer to adapter instance
846 *
847 * free adapter structure and call various cleanup routines.
848 */
849void bnx2i_free_hba(struct bnx2i_hba *hba)
850{
851 struct Scsi_Host *shost = hba->shost;
852
853 iscsi_host_remove(shost);
854 INIT_LIST_HEAD(&hba->ep_ofld_list);
855 INIT_LIST_HEAD(&hba->ep_destroy_list);
856 pci_dev_put(hba->pcidev);
857
858 if (hba->regview) {
859 iounmap(hba->regview);
860 hba->regview = NULL;
861 }
862 bnx2i_free_mp_bdt(hba);
863 bnx2i_release_free_cid_que(hba);
864 iscsi_host_free(shost);
865}
866
867/**
868 * bnx2i_conn_free_login_resources - free DMA resources used for login process
869 * @hba: pointer to adapter instance
870 * @bnx2i_conn: iscsi connection pointer
871 *
872 * Login related resources, mostly BDT & payload DMA memory is freed
873 */
874static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
875 struct bnx2i_conn *bnx2i_conn)
876{
877 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
878 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
879 bnx2i_conn->gen_pdu.resp_bd_tbl,
880 bnx2i_conn->gen_pdu.resp_bd_dma);
881 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
882 }
883
884 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
885 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
886 bnx2i_conn->gen_pdu.req_bd_tbl,
887 bnx2i_conn->gen_pdu.req_bd_dma);
888 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
889 }
890
891 if (bnx2i_conn->gen_pdu.resp_buf) {
892 dma_free_coherent(&hba->pcidev->dev,
893 ISCSI_DEF_MAX_RECV_SEG_LEN,
894 bnx2i_conn->gen_pdu.resp_buf,
895 bnx2i_conn->gen_pdu.resp_dma_addr);
896 bnx2i_conn->gen_pdu.resp_buf = NULL;
897 }
898
899 if (bnx2i_conn->gen_pdu.req_buf) {
900 dma_free_coherent(&hba->pcidev->dev,
901 ISCSI_DEF_MAX_RECV_SEG_LEN,
902 bnx2i_conn->gen_pdu.req_buf,
903 bnx2i_conn->gen_pdu.req_dma_addr);
904 bnx2i_conn->gen_pdu.req_buf = NULL;
905 }
906}
907
908/**
909 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
910 * @hba: pointer to adapter instance
911 * @bnx2i_conn: iscsi connection pointer
912 *
913 * Mgmt task DNA resources are allocated in this routine.
914 */
915static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
916 struct bnx2i_conn *bnx2i_conn)
917{
918 /* Allocate memory for login request/response buffers */
919 bnx2i_conn->gen_pdu.req_buf =
920 dma_alloc_coherent(&hba->pcidev->dev,
921 ISCSI_DEF_MAX_RECV_SEG_LEN,
922 &bnx2i_conn->gen_pdu.req_dma_addr,
923 GFP_KERNEL);
924 if (bnx2i_conn->gen_pdu.req_buf == NULL)
925 goto login_req_buf_failure;
926
927 bnx2i_conn->gen_pdu.req_buf_size = 0;
928 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
929
930 bnx2i_conn->gen_pdu.resp_buf =
931 dma_alloc_coherent(&hba->pcidev->dev,
932 ISCSI_DEF_MAX_RECV_SEG_LEN,
933 &bnx2i_conn->gen_pdu.resp_dma_addr,
934 GFP_KERNEL);
935 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
936 goto login_resp_buf_failure;
937
938 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
939 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
940
941 bnx2i_conn->gen_pdu.req_bd_tbl =
942 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
943 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
944 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
945 goto login_req_bd_tbl_failure;
946
947 bnx2i_conn->gen_pdu.resp_bd_tbl =
948 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
949 &bnx2i_conn->gen_pdu.resp_bd_dma,
950 GFP_KERNEL);
951 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
952 goto login_resp_bd_tbl_failure;
953
954 return 0;
955
956login_resp_bd_tbl_failure:
957 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
958 bnx2i_conn->gen_pdu.req_bd_tbl,
959 bnx2i_conn->gen_pdu.req_bd_dma);
960 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
961
962login_req_bd_tbl_failure:
963 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
964 bnx2i_conn->gen_pdu.resp_buf,
965 bnx2i_conn->gen_pdu.resp_dma_addr);
966 bnx2i_conn->gen_pdu.resp_buf = NULL;
967login_resp_buf_failure:
968 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
969 bnx2i_conn->gen_pdu.req_buf,
970 bnx2i_conn->gen_pdu.req_dma_addr);
971 bnx2i_conn->gen_pdu.req_buf = NULL;
972login_req_buf_failure:
973 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
974 "login resource alloc failed!!\n");
975 return -ENOMEM;
976
977}
978
979
980/**
981 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
982 * @bnx2i_conn: iscsi connection pointer
983 *
984 * Allocates buffers and BD tables before shipping requests to cnic
985 * for PDUs prepared by 'iscsid' daemon
986 */
987static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
988{
989 struct iscsi_bd *bd_tbl;
990
991 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
992
993 bd_tbl->buffer_addr_hi =
994 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
995 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
996 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
997 bnx2i_conn->gen_pdu.req_buf;
998 bd_tbl->reserved0 = 0;
999 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1000 ISCSI_BD_FIRST_IN_BD_CHAIN;
1001
1002 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1003 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1004 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1005 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1006 bd_tbl->reserved0 = 0;
1007 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1008 ISCSI_BD_FIRST_IN_BD_CHAIN;
1009}
1010
1011
1012/**
1013 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1014 * @task: transport layer task pointer
1015 *
1016 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1017 * Nop-out and Logout requests flow through this path.
1018 */
1019static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1020{
1021 struct bnx2i_cmd *cmd = task->dd_data;
1022 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1023 int rc = 0;
1024 char *buf;
1025 int data_len;
1026
1027 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1028 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1029 case ISCSI_OP_LOGIN:
1030 bnx2i_send_iscsi_login(bnx2i_conn, task);
1031 break;
1032 case ISCSI_OP_NOOP_OUT:
1033 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1034 buf = bnx2i_conn->gen_pdu.req_buf;
1035 if (data_len)
1036 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1037 RESERVED_ITT,
1038 buf, data_len, 1);
1039 else
1040 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1041 RESERVED_ITT,
1042 NULL, 0, 1);
1043 break;
1044 case ISCSI_OP_LOGOUT:
1045 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1046 break;
1047 case ISCSI_OP_SCSI_TMFUNC:
1048 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1049 break;
1050 default:
1051 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1052 "send_gen: unsupported op 0x%x\n",
1053 task->hdr->opcode);
1054 }
1055 return rc;
1056}
1057
1058
1059/**********************************************************************
1060 * SCSI-ML Interface
1061 **********************************************************************/
1062
1063/**
1064 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1065 * @sc: SCSI-ML command pointer
1066 * @cmd: iscsi cmd pointer
1067 */
1068static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1069{
1070 u32 dword;
1071 int lpcnt;
1072 u8 *srcp;
1073 u32 *dstp;
1074 u32 scsi_lun[2];
1075
1076 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1077 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1078 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1079
1080 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1081 srcp = (u8 *) sc->cmnd;
1082 dstp = (u32 *) cmd->req.cdb;
1083 while (lpcnt--) {
1084 memcpy(&dword, (const void *) srcp, 4);
1085 *dstp = cpu_to_be32(dword);
1086 srcp += 4;
1087 dstp++;
1088 }
1089 if (sc->cmd_len & 0x3) {
1090 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1091 *dstp = cpu_to_be32(dword);
1092 }
1093}
1094
1095static void bnx2i_cleanup_task(struct iscsi_task *task)
1096{
1097 struct iscsi_conn *conn = task->conn;
1098 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1099 struct bnx2i_hba *hba = bnx2i_conn->hba;
1100
1101 /*
1102 * mgmt task or cmd was never sent to us to transmit.
1103 */
1104 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1105 return;
1106 /*
1107 * need to clean-up task context to claim dma buffers
1108 */
1109 if (task->state == ISCSI_TASK_ABRT_TMF) {
1110 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1111
1112 spin_unlock_bh(&conn->session->lock);
1113 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1114 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1115 spin_lock_bh(&conn->session->lock);
1116 }
1117 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1118}
1119
1120/**
1121 * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1122 * @conn: transport layer conn structure pointer
1123 * @task: transport layer command structure pointer
1124 */
1125static int
1126bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1127{
1128 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1129 struct bnx2i_cmd *cmd = task->dd_data;
1130
1131 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1132
1133 bnx2i_setup_cmd_wqe_template(cmd);
1134 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1135 if (task->data_count) {
1136 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1137 task->data_count);
1138 bnx2i_conn->gen_pdu.req_wr_ptr =
1139 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1140 }
1141 cmd->conn = conn->dd_data;
1142 cmd->scsi_cmd = NULL;
1143 return bnx2i_iscsi_send_generic_request(task);
1144}
1145
1146/**
1147 * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1148 * @task: transport layer command structure pointer
1149 *
1150 * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1151 */
1152static int bnx2i_task_xmit(struct iscsi_task *task)
1153{
1154 struct iscsi_conn *conn = task->conn;
1155 struct iscsi_session *session = conn->session;
1156 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1157 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1158 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1159 struct scsi_cmnd *sc = task->sc;
1160 struct bnx2i_cmd *cmd = task->dd_data;
1161 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1162
1163 if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
1164 return -ENOTCONN;
1165
1166 if (!bnx2i_conn->is_bound)
1167 return -ENOTCONN;
1168
1169 /*
1170 * If there is no scsi_cmnd this must be a mgmt task
1171 */
1172 if (!sc)
1173 return bnx2i_mtask_xmit(conn, task);
1174
1175 bnx2i_setup_cmd_wqe_template(cmd);
1176 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1177 cmd->conn = bnx2i_conn;
1178 cmd->scsi_cmd = sc;
1179 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1180 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1181
1182 bnx2i_iscsi_map_sg_list(cmd);
1183 bnx2i_cpy_scsi_cdb(sc, cmd);
1184
1185 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1186 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1187 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1188 cmd->req.itt = task->itt |
1189 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1190 bnx2i_setup_write_cmd_bd_info(task);
1191 } else {
1192 if (scsi_bufflen(sc))
1193 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1194 cmd->req.itt = task->itt |
1195 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1196 }
1197
1198 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1199 if (!cmd->io_tbl.bd_valid) {
1200 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1201 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1202 cmd->req.num_bds = 1;
1203 }
1204
1205 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1206 return 0;
1207}
1208
1209/**
1210 * bnx2i_session_create - create a new iscsi session
1211 * @cmds_max: max commands supported
1212 * @qdepth: scsi queue depth to support
1213 * @initial_cmdsn: initial iscsi CMDSN to be used for this session
1214 *
1215 * Creates a new iSCSI session instance on given device.
1216 */
1217static struct iscsi_cls_session *
1218bnx2i_session_create(struct iscsi_endpoint *ep,
1219 uint16_t cmds_max, uint16_t qdepth,
1220 uint32_t initial_cmdsn)
1221{
1222 struct Scsi_Host *shost;
1223 struct iscsi_cls_session *cls_session;
1224 struct bnx2i_hba *hba;
1225 struct bnx2i_endpoint *bnx2i_ep;
1226
1227 if (!ep) {
1228 printk(KERN_ERR "bnx2i: missing ep.\n");
1229 return NULL;
1230 }
1231
1232 bnx2i_ep = ep->dd_data;
1233 shost = bnx2i_ep->hba->shost;
1234 hba = iscsi_host_priv(shost);
1235 if (bnx2i_adapter_ready(hba))
1236 return NULL;
1237
1238 /*
1239 * user can override hw limit as long as it is within
1240 * the min/max.
1241 */
1242 if (cmds_max > hba->max_sqes)
1243 cmds_max = hba->max_sqes;
1244 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1245 cmds_max = BNX2I_SQ_WQES_MIN;
1246
1247 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1248 cmds_max, sizeof(struct bnx2i_cmd),
1249 initial_cmdsn, ISCSI_MAX_TARGET);
1250 if (!cls_session)
1251 return NULL;
1252
1253 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1254 goto session_teardown;
1255 return cls_session;
1256
1257session_teardown:
1258 iscsi_session_teardown(cls_session);
1259 return NULL;
1260}
1261
1262
1263/**
1264 * bnx2i_session_destroy - destroys iscsi session
1265 * @cls_session: pointer to iscsi cls session
1266 *
1267 * Destroys previously created iSCSI session instance and releases
1268 * all resources held by it
1269 */
1270static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1271{
1272 struct iscsi_session *session = cls_session->dd_data;
1273 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1274 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1275
1276 bnx2i_destroy_cmd_pool(hba, session);
1277 iscsi_session_teardown(cls_session);
1278}
1279
1280
1281/**
1282 * bnx2i_conn_create - create iscsi connection instance
1283 * @cls_session: pointer to iscsi cls session
1284 * @cid: iscsi cid as per rfc (not NX2's CID terminology)
1285 *
1286 * Creates a new iSCSI connection instance for a given session
1287 */
1288static struct iscsi_cls_conn *
1289bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1290{
1291 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1292 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1293 struct bnx2i_conn *bnx2i_conn;
1294 struct iscsi_cls_conn *cls_conn;
1295 struct iscsi_conn *conn;
1296
1297 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1298 cid);
1299 if (!cls_conn)
1300 return NULL;
1301 conn = cls_conn->dd_data;
1302
1303 bnx2i_conn = conn->dd_data;
1304 bnx2i_conn->cls_conn = cls_conn;
1305 bnx2i_conn->hba = hba;
1306 /* 'ep' ptr will be assigned in bind() call */
1307 bnx2i_conn->ep = NULL;
1308 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1309
1310 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1311 iscsi_conn_printk(KERN_ALERT, conn,
1312 "conn_new: login resc alloc failed!!\n");
1313 goto free_conn;
1314 }
1315
1316 return cls_conn;
1317
1318free_conn:
1319 iscsi_conn_teardown(cls_conn);
1320 return NULL;
1321}
1322
1323/**
1324 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1325 * @cls_session: pointer to iscsi cls session
1326 * @cls_conn: pointer to iscsi cls conn
1327 * @transport_fd: 64-bit EP handle
1328 * @is_leading: leading connection on this session?
1329 *
1330 * Binds together iSCSI session instance, iSCSI connection instance
1331 * and the TCP connection. This routine returns error code if
1332 * TCP connection does not belong on the device iSCSI sess/conn
1333 * is bound
1334 */
1335static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1336 struct iscsi_cls_conn *cls_conn,
1337 uint64_t transport_fd, int is_leading)
1338{
1339 struct iscsi_conn *conn = cls_conn->dd_data;
1340 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1341 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1342 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1343 struct bnx2i_endpoint *bnx2i_ep;
1344 struct iscsi_endpoint *ep;
1345 int ret_code;
1346
1347 ep = iscsi_lookup_endpoint(transport_fd);
1348 if (!ep)
1349 return -EINVAL;
1350
1351 bnx2i_ep = ep->dd_data;
1352 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1353 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1354 /* Peer disconnect via' FIN or RST */
1355 return -EINVAL;
1356
1357 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1358 return -EINVAL;
1359
1360 if (bnx2i_ep->hba != hba) {
1361 /* Error - TCP connection does not belong to this device
1362 */
1363 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1364 "conn bind, ep=0x%p (%s) does not",
1365 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1366 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1367 "belong to hba (%s)\n",
1368 hba->netdev->name);
1369 return -EEXIST;
1370 }
1371
1372 bnx2i_ep->conn = bnx2i_conn;
1373 bnx2i_conn->ep = bnx2i_ep;
1374 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1375 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1376 bnx2i_conn->is_bound = 1;
1377
1378 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1379 bnx2i_ep->ep_iscsi_cid);
1380
1381 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1382 * driver needs to explicitly replenish RQ index during setup.
1383 */
1384 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1385 bnx2i_put_rq_buf(bnx2i_conn, 0);
1386
1387 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1388 return ret_code;
1389}
1390
1391
1392/**
1393 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1394 * @cls_conn: pointer to iscsi cls conn
1395 *
1396 * Destroy an iSCSI connection instance and release memory resources held by
1397 * this connection
1398 */
1399static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1400{
1401 struct iscsi_conn *conn = cls_conn->dd_data;
1402 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1403 struct Scsi_Host *shost;
1404 struct bnx2i_hba *hba;
1405
1406 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1407 hba = iscsi_host_priv(shost);
1408
1409 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1410 iscsi_conn_teardown(cls_conn);
1411}
1412
1413
1414/**
1415 * bnx2i_conn_get_param - return iscsi connection parameter to caller
1416 * @cls_conn: pointer to iscsi cls conn
1417 * @param: parameter type identifier
1418 * @buf: buffer pointer
1419 *
1420 * returns iSCSI connection parameters
1421 */
1422static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1423 enum iscsi_param param, char *buf)
1424{
1425 struct iscsi_conn *conn = cls_conn->dd_data;
1426 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1427 int len = 0;
1428
1429 switch (param) {
1430 case ISCSI_PARAM_CONN_PORT:
1431 if (bnx2i_conn->ep)
1432 len = sprintf(buf, "%hu\n",
1433 bnx2i_conn->ep->cm_sk->dst_port);
1434 break;
1435 case ISCSI_PARAM_CONN_ADDRESS:
1436 if (bnx2i_conn->ep)
1437 len = sprintf(buf, NIPQUAD_FMT "\n",
1438 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
1439 break;
1440 default:
1441 return iscsi_conn_get_param(cls_conn, param, buf);
1442 }
1443
1444 return len;
1445}
1446
1447/**
1448 * bnx2i_host_get_param - returns host (adapter) related parameters
1449 * @shost: scsi host pointer
1450 * @param: parameter type identifier
1451 * @buf: buffer pointer
1452 */
1453static int bnx2i_host_get_param(struct Scsi_Host *shost,
1454 enum iscsi_host_param param, char *buf)
1455{
1456 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1457 int len = 0;
1458
1459 switch (param) {
1460 case ISCSI_HOST_PARAM_HWADDRESS:
1461 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1462 break;
1463 case ISCSI_HOST_PARAM_NETDEV_NAME:
1464 len = sprintf(buf, "%s\n", hba->netdev->name);
1465 break;
1466 default:
1467 return iscsi_host_get_param(shost, param, buf);
1468 }
1469 return len;
1470}
1471
1472/**
1473 * bnx2i_conn_start - completes iscsi connection migration to FFP
1474 * @cls_conn: pointer to iscsi cls conn
1475 *
1476 * last call in FFP migration to handover iscsi conn to the driver
1477 */
1478static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1479{
1480 struct iscsi_conn *conn = cls_conn->dd_data;
1481 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1482
1483 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1484 bnx2i_update_iscsi_conn(conn);
1485
1486 /*
1487 * this should normally not sleep for a long time so it should
1488 * not disrupt the caller.
1489 */
1490 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1491 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1492 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1493 add_timer(&bnx2i_conn->ep->ofld_timer);
1494 /* update iSCSI context for this conn, wait for CNIC to complete */
1495 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1496 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1497
1498 if (signal_pending(current))
1499 flush_signals(current);
1500 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1501
1502 iscsi_conn_start(cls_conn);
1503 return 0;
1504}
1505
1506
1507/**
1508 * bnx2i_conn_get_stats - returns iSCSI stats
1509 * @cls_conn: pointer to iscsi cls conn
1510 * @stats: pointer to iscsi statistic struct
1511 */
1512static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1513 struct iscsi_stats *stats)
1514{
1515 struct iscsi_conn *conn = cls_conn->dd_data;
1516
1517 stats->txdata_octets = conn->txdata_octets;
1518 stats->rxdata_octets = conn->rxdata_octets;
1519 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1520 stats->dataout_pdus = conn->dataout_pdus_cnt;
1521 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1522 stats->datain_pdus = conn->datain_pdus_cnt;
1523 stats->r2t_pdus = conn->r2t_pdus_cnt;
1524 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1525 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1526 stats->custom_length = 3;
1527 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1528 stats->custom[2].value = conn->eh_abort_cnt;
1529 stats->digest_err = 0;
1530 stats->timeout_err = 0;
1531 stats->custom_length = 0;
1532}
1533
1534
1535/**
1536 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1537 * @dst_addr: target IP address
1538 *
1539 * check if route resolves to BNX2 device
1540 */
1541static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1542{
1543 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1544 struct bnx2i_hba *hba;
1545 struct cnic_dev *cnic = NULL;
1546
1547 bnx2i_reg_dev_all();
1548
1549 hba = get_adapter_list_head();
1550 if (hba && hba->cnic)
1551 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1552 if (!cnic) {
1553 printk(KERN_ALERT "bnx2i: no route,"
1554 "can't connect using cnic\n");
1555 goto no_nx2_route;
1556 }
1557 hba = bnx2i_find_hba_for_cnic(cnic);
1558 if (!hba)
1559 goto no_nx2_route;
1560
1561 if (bnx2i_adapter_ready(hba)) {
1562 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1563 goto no_nx2_route;
1564 }
1565 if (hba->netdev->mtu > hba->mtu_supported) {
1566 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1567 hba->netdev->name, hba->netdev->mtu);
1568 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1569 hba->mtu_supported);
1570 goto no_nx2_route;
1571 }
1572 return hba;
1573no_nx2_route:
1574 return NULL;
1575}
1576
1577
1578/**
1579 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1580 * @hba: pointer to adapter instance
1581 * @ep: endpoint (transport indentifier) structure
1582 *
1583 * destroys cm_sock structure and on chip iscsi context
1584 */
1585static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1586 struct bnx2i_endpoint *ep)
1587{
1588 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
1589 hba->cnic->cm_destroy(ep->cm_sk);
1590
1591 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
1592 ep->state = EP_STATE_DISCONN_COMPL;
1593
1594 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1595 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1596 printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
1597 " NW/PCIe trace, driver msgs to developers"
1598 " for analysis\n");
1599 return 1;
1600 }
1601
1602 ep->state = EP_STATE_CLEANUP_START;
1603 init_timer(&ep->ofld_timer);
1604 ep->ofld_timer.expires = 10*HZ + jiffies;
1605 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1606 ep->ofld_timer.data = (unsigned long) ep;
1607 add_timer(&ep->ofld_timer);
1608
1609 bnx2i_ep_destroy_list_add(hba, ep);
1610
1611 /* destroy iSCSI context, wait for it to complete */
1612 bnx2i_send_conn_destroy(hba, ep);
1613 wait_event_interruptible(ep->ofld_wait,
1614 (ep->state != EP_STATE_CLEANUP_START));
1615
1616 if (signal_pending(current))
1617 flush_signals(current);
1618 del_timer_sync(&ep->ofld_timer);
1619
1620 bnx2i_ep_destroy_list_del(hba, ep);
1621
1622 if (ep->state != EP_STATE_CLEANUP_CMPL)
1623 /* should never happen */
1624 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1625
1626 return 0;
1627}
1628
1629
1630/**
1631 * bnx2i_ep_connect - establish TCP connection to target portal
1632 * @shost: scsi host
1633 * @dst_addr: target IP address
1634 * @non_blocking: blocking or non-blocking call
1635 *
1636 * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1637 * with l5_core and the CNIC. This is a multi-step process of resolving
1638 * route to target, create a iscsi connection context, handshaking with
1639 * CNIC module to create/initialize the socket struct and finally
1640 * sending down option-2 request to complete TCP 3-way handshake
1641 */
1642static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1643 struct sockaddr *dst_addr,
1644 int non_blocking)
1645{
1646 u32 iscsi_cid = BNX2I_CID_RESERVED;
1647 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1648 struct sockaddr_in6 *desti6;
1649 struct bnx2i_endpoint *bnx2i_ep;
1650 struct bnx2i_hba *hba;
1651 struct cnic_dev *cnic;
1652 struct cnic_sockaddr saddr;
1653 struct iscsi_endpoint *ep;
1654 int rc = 0;
1655
1656 if (shost)
1657 /* driver is given scsi host to work with */
1658 hba = iscsi_host_priv(shost);
1659 else
1660 /*
1661 * check if the given destination can be reached through
1662 * a iscsi capable NetXtreme2 device
1663 */
1664 hba = bnx2i_check_route(dst_addr);
1665 if (!hba) {
1666 rc = -ENOMEM;
1667 goto check_busy;
1668 }
1669
1670 cnic = hba->cnic;
1671 ep = bnx2i_alloc_ep(hba);
1672 if (!ep) {
1673 rc = -ENOMEM;
1674 goto check_busy;
1675 }
1676 bnx2i_ep = ep->dd_data;
1677
1678 mutex_lock(&hba->net_dev_lock);
1679 if (bnx2i_adapter_ready(hba)) {
1680 rc = -EPERM;
1681 goto net_if_down;
1682 }
1683
1684 bnx2i_ep->state = EP_STATE_IDLE;
1685 bnx2i_ep->ep_iscsi_cid = (u16) -1;
1686 bnx2i_ep->num_active_cmds = 0;
1687 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1688 if (iscsi_cid == -1) {
1689 printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
1690 rc = -ENOMEM;
1691 goto iscsi_cid_err;
1692 }
1693 bnx2i_ep->hba_age = hba->age;
1694
1695 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1696 if (rc != 0) {
1697 printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
1698 rc = -ENOMEM;
1699 goto qp_resc_err;
1700 }
1701
1702 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1703 bnx2i_ep->state = EP_STATE_OFLD_START;
1704 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1705
1706 init_timer(&bnx2i_ep->ofld_timer);
1707 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1708 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1709 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1710 add_timer(&bnx2i_ep->ofld_timer);
1711
1712 bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
1713
1714 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1715 wait_event_interruptible(bnx2i_ep->ofld_wait,
1716 bnx2i_ep->state != EP_STATE_OFLD_START);
1717
1718 if (signal_pending(current))
1719 flush_signals(current);
1720 del_timer_sync(&bnx2i_ep->ofld_timer);
1721
1722 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1723
1724 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1725 rc = -ENOSPC;
1726 goto conn_failed;
1727 }
1728
1729 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1730 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1731 if (rc) {
1732 rc = -EINVAL;
1733 goto conn_failed;
1734 }
1735
1736 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1737 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1738 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1739
1740 memset(&saddr, 0, sizeof(saddr));
1741 if (dst_addr->sa_family == AF_INET) {
1742 desti = (struct sockaddr_in *) dst_addr;
1743 saddr.remote.v4 = *desti;
1744 saddr.local.v4.sin_family = desti->sin_family;
1745 } else if (dst_addr->sa_family == AF_INET6) {
1746 desti6 = (struct sockaddr_in6 *) dst_addr;
1747 saddr.remote.v6 = *desti6;
1748 saddr.local.v6.sin6_family = desti6->sin6_family;
1749 }
1750
1751 bnx2i_ep->timestamp = jiffies;
1752 bnx2i_ep->state = EP_STATE_CONNECT_START;
1753 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1754 rc = -EINVAL;
1755 goto conn_failed;
1756 } else
1757 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1758
1759 if (rc)
1760 goto release_ep;
1761
1762 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1763 goto release_ep;
1764 mutex_unlock(&hba->net_dev_lock);
1765 return ep;
1766
1767release_ep:
1768 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1769 mutex_unlock(&hba->net_dev_lock);
1770 return ERR_PTR(rc);
1771 }
1772conn_failed:
1773net_if_down:
1774iscsi_cid_err:
1775 bnx2i_free_qp_resc(hba, bnx2i_ep);
1776qp_resc_err:
1777 bnx2i_free_ep(ep);
1778 mutex_unlock(&hba->net_dev_lock);
1779check_busy:
1780 bnx2i_unreg_dev_all();
1781 return ERR_PTR(rc);
1782}
1783
1784
1785/**
1786 * bnx2i_ep_poll - polls for TCP connection establishement
1787 * @ep: TCP connection (endpoint) handle
1788 * @timeout_ms: timeout value in milli secs
1789 *
1790 * polls for TCP connect request to complete
1791 */
1792static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1793{
1794 struct bnx2i_endpoint *bnx2i_ep;
1795 int rc = 0;
1796
1797 bnx2i_ep = ep->dd_data;
1798 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1799 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1800 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1801 return -1;
1802 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1803 return 1;
1804
1805 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1806 ((bnx2i_ep->state ==
1807 EP_STATE_OFLD_FAILED) ||
1808 (bnx2i_ep->state ==
1809 EP_STATE_CONNECT_FAILED) ||
1810 (bnx2i_ep->state ==
1811 EP_STATE_CONNECT_COMPL)),
1812 msecs_to_jiffies(timeout_ms));
1813 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1814 rc = -1;
1815
1816 if (rc > 0)
1817 return 1;
1818 else if (!rc)
1819 return 0; /* timeout */
1820 else
1821 return rc;
1822}
1823
1824
1825/**
1826 * bnx2i_ep_tcp_conn_active - check EP state transition
1827 * @ep: endpoint pointer
1828 *
1829 * check if underlying TCP connection is active
1830 */
1831static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1832{
1833 int ret;
1834 int cnic_dev_10g = 0;
1835
1836 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1837 cnic_dev_10g = 1;
1838
1839 switch (bnx2i_ep->state) {
1840 case EP_STATE_CONNECT_START:
1841 case EP_STATE_CLEANUP_FAILED:
1842 case EP_STATE_OFLD_FAILED:
1843 case EP_STATE_DISCONN_TIMEDOUT:
1844 ret = 0;
1845 break;
1846 case EP_STATE_CONNECT_COMPL:
1847 case EP_STATE_ULP_UPDATE_START:
1848 case EP_STATE_ULP_UPDATE_COMPL:
1849 case EP_STATE_TCP_FIN_RCVD:
1850 case EP_STATE_ULP_UPDATE_FAILED:
1851 ret = 1;
1852 break;
1853 case EP_STATE_TCP_RST_RCVD:
1854 ret = 0;
1855 break;
1856 case EP_STATE_CONNECT_FAILED:
1857 if (cnic_dev_10g)
1858 ret = 1;
1859 else
1860 ret = 0;
1861 break;
1862 default:
1863 ret = 0;
1864 }
1865
1866 return ret;
1867}
1868
1869
1870/**
1871 * bnx2i_ep_disconnect - executes TCP connection teardown process
1872 * @ep: TCP connection (endpoint) handle
1873 *
1874 * executes TCP connection teardown process
1875 */
1876static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1877{
1878 struct bnx2i_endpoint *bnx2i_ep;
1879 struct bnx2i_conn *bnx2i_conn = NULL;
1880 struct iscsi_session *session = NULL;
1881 struct iscsi_conn *conn;
1882 struct cnic_dev *cnic;
1883 struct bnx2i_hba *hba;
1884
1885 bnx2i_ep = ep->dd_data;
1886
1887 /* driver should not attempt connection cleanup untill TCP_CONNECT
1888 * completes either successfully or fails. Timeout is 9-secs, so
1889 * wait for it to complete
1890 */
1891 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
1892 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
1893 msleep(250);
1894
1895 if (bnx2i_ep->conn) {
1896 bnx2i_conn = bnx2i_ep->conn;
1897 conn = bnx2i_conn->cls_conn->dd_data;
1898 session = conn->session;
1899
1900 spin_lock_bh(&session->lock);
1901 bnx2i_conn->is_bound = 0;
1902 spin_unlock_bh(&session->lock);
1903 }
1904
1905 hba = bnx2i_ep->hba;
1906 if (bnx2i_ep->state == EP_STATE_IDLE)
1907 goto return_bnx2i_ep;
1908 cnic = hba->cnic;
1909
1910 mutex_lock(&hba->net_dev_lock);
1911
1912 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
1913 goto free_resc;
1914 if (bnx2i_ep->hba_age != hba->age)
1915 goto free_resc;
1916
1917 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1918 goto destory_conn;
1919
1920 bnx2i_ep->state = EP_STATE_DISCONN_START;
1921
1922 init_timer(&bnx2i_ep->ofld_timer);
1923 bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
1924 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1925 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1926 add_timer(&bnx2i_ep->ofld_timer);
1927
1928 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1929 int close = 0;
1930
1931 if (session) {
1932 spin_lock_bh(&session->lock);
1933 if (session->state == ISCSI_STATE_LOGGING_OUT)
1934 close = 1;
1935 spin_unlock_bh(&session->lock);
1936 }
1937 if (close)
1938 cnic->cm_close(bnx2i_ep->cm_sk);
1939 else
1940 cnic->cm_abort(bnx2i_ep->cm_sk);
1941 } else
1942 goto free_resc;
1943
1944 /* wait for option-2 conn teardown */
1945 wait_event_interruptible(bnx2i_ep->ofld_wait,
1946 bnx2i_ep->state != EP_STATE_DISCONN_START);
1947
1948 if (signal_pending(current))
1949 flush_signals(current);
1950 del_timer_sync(&bnx2i_ep->ofld_timer);
1951
1952destory_conn:
1953 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1954 mutex_unlock(&hba->net_dev_lock);
1955 return;
1956 }
1957free_resc:
1958 mutex_unlock(&hba->net_dev_lock);
1959 bnx2i_free_qp_resc(hba, bnx2i_ep);
1960return_bnx2i_ep:
1961 if (bnx2i_conn)
1962 bnx2i_conn->ep = NULL;
1963
1964 bnx2i_free_ep(ep);
1965
1966 if (!hba->ofld_conns_active)
1967 bnx2i_unreg_dev_all();
1968}
1969
1970
1971/**
1972 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
1973 * @buf: pointer to buffer containing iscsi path message
1974 *
1975 */
1976static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
1977{
1978 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1979 char *buf = (char *) params;
1980 u16 len = sizeof(*params);
1981
1982 /* handled by cnic driver */
1983 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
1984 len);
1985
1986 return 0;
1987}
1988
1989
1990/*
1991 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
1992 * used while registering with the scsi host and iSCSI transport module.
1993 */
1994static struct scsi_host_template bnx2i_host_template = {
1995 .module = THIS_MODULE,
1996 .name = "Broadcom Offload iSCSI Initiator",
1997 .proc_name = "bnx2i",
1998 .queuecommand = iscsi_queuecommand,
1999 .eh_abort_handler = iscsi_eh_abort,
2000 .eh_device_reset_handler = iscsi_eh_device_reset,
2001 .eh_target_reset_handler = iscsi_eh_target_reset,
2002 .can_queue = 1024,
2003 .max_sectors = 127,
2004 .cmd_per_lun = 32,
2005 .this_id = -1,
2006 .use_clustering = ENABLE_CLUSTERING,
2007 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2008 .shost_attrs = bnx2i_dev_attributes,
2009};
2010
2011struct iscsi_transport bnx2i_iscsi_transport = {
2012 .owner = THIS_MODULE,
2013 .name = "bnx2i",
2014 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2015 CAP_MULTI_R2T | CAP_DATADGST |
2016 CAP_DATA_PATH_OFFLOAD,
2017 .param_mask = ISCSI_MAX_RECV_DLENGTH |
2018 ISCSI_MAX_XMIT_DLENGTH |
2019 ISCSI_HDRDGST_EN |
2020 ISCSI_DATADGST_EN |
2021 ISCSI_INITIAL_R2T_EN |
2022 ISCSI_MAX_R2T |
2023 ISCSI_IMM_DATA_EN |
2024 ISCSI_FIRST_BURST |
2025 ISCSI_MAX_BURST |
2026 ISCSI_PDU_INORDER_EN |
2027 ISCSI_DATASEQ_INORDER_EN |
2028 ISCSI_ERL |
2029 ISCSI_CONN_PORT |
2030 ISCSI_CONN_ADDRESS |
2031 ISCSI_EXP_STATSN |
2032 ISCSI_PERSISTENT_PORT |
2033 ISCSI_PERSISTENT_ADDRESS |
2034 ISCSI_TARGET_NAME | ISCSI_TPGT |
2035 ISCSI_USERNAME | ISCSI_PASSWORD |
2036 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2037 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2038 ISCSI_LU_RESET_TMO |
2039 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
2042 .create_session = bnx2i_session_create,
2043 .destroy_session = bnx2i_session_destroy,
2044 .create_conn = bnx2i_conn_create,
2045 .bind_conn = bnx2i_conn_bind,
2046 .destroy_conn = bnx2i_conn_destroy,
2047 .set_param = iscsi_set_param,
2048 .get_conn_param = bnx2i_conn_get_param,
2049 .get_session_param = iscsi_session_get_param,
2050 .get_host_param = bnx2i_host_get_param,
2051 .start_conn = bnx2i_conn_start,
2052 .stop_conn = iscsi_conn_stop,
2053 .send_pdu = iscsi_conn_send_pdu,
2054 .xmit_task = bnx2i_task_xmit,
2055 .get_stats = bnx2i_conn_get_stats,
2056 /* TCP connect - disconnect - option-2 interface calls */
2057 .ep_connect = bnx2i_ep_connect,
2058 .ep_poll = bnx2i_ep_poll,
2059 .ep_disconnect = bnx2i_ep_disconnect,
2060 .set_path = bnx2i_nl_set_path,
2061 /* Error recovery timeout call */
2062 .session_recovery_timedout = iscsi_session_recovery_timedout,
2063 .cleanup_task = bnx2i_cleanup_task,
2064};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 000000000000..96426b751eb2
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2004 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11
12#include "bnx2i.h"
13
14/**
15 * bnx2i_dev_to_hba - maps dev pointer to adapter struct
16 * @dev: device pointer
17 *
18 * Map device to hba structure
19 */
20static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
21{
22 struct Scsi_Host *shost = class_to_shost(dev);
23 return iscsi_host_priv(shost);
24}
25
26
27/**
28 * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
29 * @dev: device pointer
30 * @buf: buffer to return current SQ size parameter
31 *
32 * Returns current SQ size parameter, this paramater determines the number
33 * outstanding iSCSI commands supported on a connection
34 */
35static ssize_t bnx2i_show_sq_info(struct device *dev,
36 struct device_attribute *attr, char *buf)
37{
38 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
39
40 return sprintf(buf, "0x%x\n", hba->max_sqes);
41}
42
43
44/**
45 * bnx2i_set_sq_info - update send queue (SQ) size parameter
46 * @dev: device pointer
47 * @buf: buffer to return current SQ size parameter
48 * @count: parameter buffer size
49 *
50 * Interface for user to change shared queue size allocated for each conn
51 * Must be within SQ limits and a power of 2. For the latter this is needed
52 * because of how libiscsi preallocates tasks.
53 */
54static ssize_t bnx2i_set_sq_info(struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{
58 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
59 u32 val;
60 int max_sq_size;
61
62 if (hba->ofld_conns_active)
63 goto skip_config;
64
65 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
66 max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
67 else
68 max_sq_size = BNX2I_570X_SQ_WQES_MAX;
69
70 if (sscanf(buf, " 0x%x ", &val) > 0) {
71 if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
72 (is_power_of_2(val)))
73 hba->max_sqes = val;
74 }
75
76 return count;
77
78skip_config:
79 printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
80 return 0;
81}
82
83
84/**
85 * bnx2i_show_ccell_info - returns command cell (HQ) size
86 * @dev: device pointer
87 * @buf: buffer to return current SQ size parameter
88 *
89 * returns per-connection TCP history queue size parameter
90 */
91static ssize_t bnx2i_show_ccell_info(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
95
96 return sprintf(buf, "0x%x\n", hba->num_ccell);
97}
98
99
100/**
101 * bnx2i_get_link_state - set command cell (HQ) size
102 * @dev: device pointer
103 * @buf: buffer to return current SQ size parameter
104 * @count: parameter buffer size
105 *
106 * updates per-connection TCP history queue size parameter
107 */
108static ssize_t bnx2i_set_ccell_info(struct device *dev,
109 struct device_attribute *attr,
110 const char *buf, size_t count)
111{
112 u32 val;
113 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
114
115 if (hba->ofld_conns_active)
116 goto skip_config;
117
118 if (sscanf(buf, " 0x%x ", &val) > 0) {
119 if ((val >= BNX2I_CCELLS_MIN) &&
120 (val <= BNX2I_CCELLS_MAX)) {
121 hba->num_ccell = val;
122 }
123 }
124
125 return count;
126
127skip_config:
128 printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
129 return 0;
130}
131
132
133static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
134 bnx2i_show_sq_info, bnx2i_set_sq_info);
135static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
136 bnx2i_show_ccell_info, bnx2i_set_ccell_info);
137
138struct device_attribute *bnx2i_dev_attributes[] = {
139 &dev_attr_sq_size,
140 &dev_attr_num_ccell,
141 NULL
142};
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index 59b0958d2d11..e3133b58e594 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *); 144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *); 145void cxgb3i_adapter_close(struct t3cdev *);
146 146
147struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
148struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, 147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
149 struct net_device *); 148 struct net_device *);
150void cxgb3i_hba_host_remove(struct cxgb3i_hba *); 149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 9212400b9b13..74369a3f963b 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <net/dst.h>
16#include <net/tcp.h> 17#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h> 18#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h> 19#include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
178 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device 179 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
179 * @t3dev: t3cdev adapter 180 * @t3dev: t3cdev adapter
180 */ 181 */
181struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) 182static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
182{ 183{
183 struct cxgb3i_adapter *snic; 184 struct cxgb3i_adapter *snic;
184 int i; 185 int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
261 262
262/** 263/**
263 * cxgb3i_ep_connect - establish TCP connection to target portal 264 * cxgb3i_ep_connect - establish TCP connection to target portal
265 * @shost: scsi host to use
264 * @dst_addr: target IP address 266 * @dst_addr: target IP address
265 * @non_blocking: blocking or non-blocking call 267 * @non_blocking: blocking or non-blocking call
266 * 268 *
267 * Initiates a TCP/IP connection to the dst_addr 269 * Initiates a TCP/IP connection to the dst_addr
268 */ 270 */
269static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, 271static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
272 struct sockaddr *dst_addr,
270 int non_blocking) 273 int non_blocking)
271{ 274{
272 struct iscsi_endpoint *ep; 275 struct iscsi_endpoint *ep;
273 struct cxgb3i_endpoint *cep; 276 struct cxgb3i_endpoint *cep;
274 struct cxgb3i_hba *hba; 277 struct cxgb3i_hba *hba = NULL;
275 struct s3_conn *c3cn = NULL; 278 struct s3_conn *c3cn = NULL;
276 int err = 0; 279 int err = 0;
277 280
281 if (shost)
282 hba = iscsi_host_priv(shost);
283
284 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
285
278 c3cn = cxgb3i_c3cn_create(); 286 c3cn = cxgb3i_c3cn_create();
279 if (!c3cn) { 287 if (!c3cn) {
280 cxgb3i_log_info("ep connect OOM.\n"); 288 cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
282 goto release_conn; 290 goto release_conn;
283 } 291 }
284 292
285 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); 293 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
294 (struct sockaddr_in *)dst_addr);
286 if (err < 0) { 295 if (err < 0) {
287 cxgb3i_log_info("ep connect failed.\n"); 296 cxgb3i_log_info("ep connect failed.\n");
288 goto release_conn; 297 goto release_conn;
289 } 298 }
299
290 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); 300 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
291 if (!hba) { 301 if (!hba) {
292 err = -ENOSPC; 302 err = -ENOSPC;
293 cxgb3i_log_info("NOT going through cxgbi device.\n"); 303 cxgb3i_log_info("NOT going through cxgbi device.\n");
294 goto release_conn; 304 goto release_conn;
295 } 305 }
306
307 if (shost && hba != iscsi_host_priv(shost)) {
308 err = -ENOSPC;
309 cxgb3i_log_info("Could not connect through request host%u\n",
310 shost->host_no);
311 goto release_conn;
312 }
313
296 if (c3cn_is_closing(c3cn)) { 314 if (c3cn_is_closing(c3cn)) {
297 err = -ENOSPC; 315 err = -ENOSPC;
298 cxgb3i_log_info("ep connect unable to connect.\n"); 316 cxgb3i_log_info("ep connect unable to connect.\n");
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index e11c9c180f39..c1d5be4adf9c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1479 return NULL; 1479 return NULL;
1480} 1480}
1481 1481
1482static struct rtable *find_route(__be32 saddr, __be32 daddr, 1482static struct rtable *find_route(struct net_device *dev,
1483 __be32 saddr, __be32 daddr,
1483 __be16 sport, __be16 dport) 1484 __be16 sport, __be16 dport)
1484{ 1485{
1485 struct rtable *rt; 1486 struct rtable *rt;
1486 struct flowi fl = { 1487 struct flowi fl = {
1487 .oif = 0, 1488 .oif = dev ? dev->ifindex : 0,
1488 .nl_u = { 1489 .nl_u = {
1489 .ip4_u = { 1490 .ip4_u = {
1490 .daddr = daddr, 1491 .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
1573 * 1574 *
1574 * return 0 if active open request is sent, < 0 otherwise. 1575 * return 0 if active open request is sent, < 0 otherwise.
1575 */ 1576 */
1576int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) 1577int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1578 struct sockaddr_in *usin)
1577{ 1579{
1578 struct rtable *rt; 1580 struct rtable *rt;
1579 struct net_device *dev;
1580 struct cxgb3i_sdev_data *cdata; 1581 struct cxgb3i_sdev_data *cdata;
1581 struct t3cdev *cdev; 1582 struct t3cdev *cdev;
1582 __be32 sipv4; 1583 __be32 sipv4;
1583 int err; 1584 int err;
1584 1585
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1587
1585 if (usin->sin_family != AF_INET) 1588 if (usin->sin_family != AF_INET)
1586 return -EAFNOSUPPORT; 1589 return -EAFNOSUPPORT;
1587 1590
1588 c3cn->daddr.sin_port = usin->sin_port; 1591 c3cn->daddr.sin_port = usin->sin_port;
1589 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1590 1593
1591 rt = find_route(c3cn->saddr.sin_addr.s_addr, 1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1592 c3cn->daddr.sin_addr.s_addr, 1595 c3cn->daddr.sin_addr.s_addr,
1593 c3cn->saddr.sin_port, 1596 c3cn->saddr.sin_port,
1594 c3cn->daddr.sin_port); 1597 c3cn->daddr.sin_port);
1595 if (rt == NULL) { 1598 if (rt == NULL) {
1596 c3cn_conn_debug("NO route to 0x%x, port %u.\n", 1599 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1597 c3cn->daddr.sin_addr.s_addr, 1600 c3cn->daddr.sin_addr.s_addr,
1598 ntohs(c3cn->daddr.sin_port)); 1601 ntohs(c3cn->daddr.sin_port),
1602 dev ? dev->name : "any");
1599 return -ENETUNREACH; 1603 return -ENETUNREACH;
1600 } 1604 }
1601 1605
1602 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 1606 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1603 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", 1607 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1604 c3cn->daddr.sin_addr.s_addr, 1608 c3cn->daddr.sin_addr.s_addr,
1605 ntohs(c3cn->daddr.sin_port)); 1609 ntohs(c3cn->daddr.sin_port),
1610 dev ? dev->name : "any");
1606 ip_rt_put(rt); 1611 ip_rt_put(rt);
1607 return -ENETUNREACH; 1612 return -ENETUNREACH;
1608 } 1613 }
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index ebfca960c0a9..6a1d86b1fafe 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *); 169void cxgb3i_sdev_remove(struct t3cdev *);
170 170
171struct s3_conn *cxgb3i_c3cn_create(void); 171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); 172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
173void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); 174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
174int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); 175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
175void cxgb3i_c3cn_release(struct s3_conn *); 176void cxgb3i_c3cn_release(struct s3_conn *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 43b8c51e98d0..fd0544f7da81 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
561 struct rdac_dh_data *h = get_rdac_data(sdev); 561 struct rdac_dh_data *h = get_rdac_data(sdev);
562 switch (sense_hdr->sense_key) { 562 switch (sense_hdr->sense_key) {
563 case NOT_READY: 563 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
565 /* LUN Not Ready - Logical Unit Not Ready and is in
566 * the process of becoming ready
567 * Just retry.
568 */
569 return ADD_TO_MLQUEUE;
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) 570 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
565 /* LUN Not Ready - Storage firmware incompatible 571 /* LUN Not Ready - Storage firmware incompatible
566 * Manual code synchonisation required. 572 * Manual code synchonisation required.
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 03e1926f40b5..e606b4829d44 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
54/* fcoe host list */ 54/* fcoe host list */
55LIST_HEAD(fcoe_hostlist); 55LIST_HEAD(fcoe_hostlist);
56DEFINE_RWLOCK(fcoe_hostlist_lock); 56DEFINE_RWLOCK(fcoe_hostlist_lock);
57DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 57DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
59 58
60/* Function Prototypes */ 59/* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *); 70static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *); 71static int fcoe_hostlist_remove(const struct fc_lport *);
73 72
74static int fcoe_check_wait_queue(struct fc_lport *); 73static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *); 74static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void); 75static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void); 76static void fcoe_dev_cleanup(void);
@@ -146,6 +145,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
146 lp->link_up = 0; 145 lp->link_up = 0;
147 lp->qfull = 0; 146 lp->qfull = 0;
148 lp->max_retry_count = 3; 147 lp->max_retry_count = 3;
148 lp->max_rport_retry_count = 3;
149 lp->e_d_tov = 2 * 1000; /* FC-FS default */ 149 lp->e_d_tov = 2 * 1000; /* FC-FS default */
150 lp->r_a_tov = 2 * 2 * 1000; 150 lp->r_a_tov = 2 * 2 * 1000;
151 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 151 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -167,6 +167,18 @@ static int fcoe_lport_config(struct fc_lport *lp)
167} 167}
168 168
169/** 169/**
170 * fcoe_queue_timer() - fcoe queue timer
171 * @lp: the fc_lport pointer
172 *
173 * Calls fcoe_check_wait_queue on timeout
174 *
175 */
176static void fcoe_queue_timer(ulong lp)
177{
178 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
179}
180
181/**
170 * fcoe_netdev_config() - Set up netdev for SW FCoE 182 * fcoe_netdev_config() - Set up netdev for SW FCoE
171 * @lp : ptr to the fc_lport 183 * @lp : ptr to the fc_lport
172 * @netdev : ptr to the associated netdevice struct 184 * @netdev : ptr to the associated netdevice struct
@@ -236,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
236 } 248 }
237 skb_queue_head_init(&fc->fcoe_pending_queue); 249 skb_queue_head_init(&fc->fcoe_pending_queue);
238 fc->fcoe_pending_queue_active = 0; 250 fc->fcoe_pending_queue_active = 0;
251 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
239 252
240 /* setup Source Mac Address */ 253 /* setup Source Mac Address */
241 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, 254 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
@@ -386,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
386 /* Free existing skbs */ 399 /* Free existing skbs */
387 fcoe_clean_pending_queue(lp); 400 fcoe_clean_pending_queue(lp);
388 401
402 /* Stop the timer */
403 del_timer_sync(&fc->timer);
404
389 /* Free memory used by statistical counters */ 405 /* Free memory used by statistical counters */
390 fc_lport_free_stats(lp); 406 fc_lport_free_stats(lp);
391 407
@@ -988,7 +1004,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
988 */ 1004 */
989int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 1005int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
990{ 1006{
991 int wlen, rc = 0; 1007 int wlen;
992 u32 crc; 1008 u32 crc;
993 struct ethhdr *eh; 1009 struct ethhdr *eh;
994 struct fcoe_crc_eof *cp; 1010 struct fcoe_crc_eof *cp;
@@ -1021,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1021 sof = fr_sof(fp); 1037 sof = fr_sof(fp);
1022 eof = fr_eof(fp); 1038 eof = fr_eof(fp);
1023 1039
1024 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? 1040 elen = sizeof(struct ethhdr);
1025 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1026 hlen = sizeof(struct fcoe_hdr); 1041 hlen = sizeof(struct fcoe_hdr);
1027 tlen = sizeof(struct fcoe_crc_eof); 1042 tlen = sizeof(struct fcoe_crc_eof);
1028 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1043 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1107,18 +1122,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1107 /* send down to lld */ 1122 /* send down to lld */
1108 fr_dev(fp) = lp; 1123 fr_dev(fp) = lp;
1109 if (fc->fcoe_pending_queue.qlen) 1124 if (fc->fcoe_pending_queue.qlen)
1110 rc = fcoe_check_wait_queue(lp); 1125 fcoe_check_wait_queue(lp, skb);
1111 1126 else if (fcoe_start_io(skb))
1112 if (rc == 0) 1127 fcoe_check_wait_queue(lp, skb);
1113 rc = fcoe_start_io(skb);
1114
1115 if (rc) {
1116 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1117 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1118 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1119 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1120 lp->qfull = 1;
1121 }
1122 1128
1123 return 0; 1129 return 0;
1124} 1130}
@@ -1268,32 +1274,6 @@ int fcoe_percpu_receive_thread(void *arg)
1268} 1274}
1269 1275
1270/** 1276/**
1271 * fcoe_watchdog() - fcoe timer callback
1272 * @vp:
1273 *
1274 * This checks the pending queue length for fcoe and set lport qfull
1275 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1276 * fcoe_hostlist.
1277 *
1278 * Returns: 0 for success
1279 */
1280void fcoe_watchdog(ulong vp)
1281{
1282 struct fcoe_softc *fc;
1283
1284 read_lock(&fcoe_hostlist_lock);
1285 list_for_each_entry(fc, &fcoe_hostlist, list) {
1286 if (fc->ctlr.lp)
1287 fcoe_check_wait_queue(fc->ctlr.lp);
1288 }
1289 read_unlock(&fcoe_hostlist_lock);
1290
1291 fcoe_timer.expires = jiffies + (1 * HZ);
1292 add_timer(&fcoe_timer);
1293}
1294
1295
1296/**
1297 * fcoe_check_wait_queue() - attempt to clear the transmit backlog 1277 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1298 * @lp: the fc_lport 1278 * @lp: the fc_lport
1299 * 1279 *
@@ -1305,16 +1285,17 @@ void fcoe_watchdog(ulong vp)
1305 * The wait_queue is used when the skb transmit fails. skb will go 1285 * The wait_queue is used when the skb transmit fails. skb will go
1306 * in the wait_queue which will be emptied by the timer function or 1286 * in the wait_queue which will be emptied by the timer function or
1307 * by the next skb transmit. 1287 * by the next skb transmit.
1308 *
1309 * Returns: 0 for success
1310 */ 1288 */
1311static int fcoe_check_wait_queue(struct fc_lport *lp) 1289static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1312{ 1290{
1313 struct fcoe_softc *fc = lport_priv(lp); 1291 struct fcoe_softc *fc = lport_priv(lp);
1314 struct sk_buff *skb; 1292 int rc;
1315 int rc = -1;
1316 1293
1317 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1294 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1295
1296 if (skb)
1297 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1298
1318 if (fc->fcoe_pending_queue_active) 1299 if (fc->fcoe_pending_queue_active)
1319 goto out; 1300 goto out;
1320 fc->fcoe_pending_queue_active = 1; 1301 fc->fcoe_pending_queue_active = 1;
@@ -1340,23 +1321,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
1340 1321
1341 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1322 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1342 lp->qfull = 0; 1323 lp->qfull = 0;
1324 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1325 mod_timer(&fc->timer, jiffies + 2);
1343 fc->fcoe_pending_queue_active = 0; 1326 fc->fcoe_pending_queue_active = 0;
1344 rc = fc->fcoe_pending_queue.qlen;
1345out: 1327out:
1328 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1329 lp->qfull = 1;
1346 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1330 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1347 return rc; 1331 return;
1348} 1332}
1349 1333
1350/** 1334/**
1351 * fcoe_dev_setup() - setup link change notification interface 1335 * fcoe_dev_setup() - setup link change notification interface
1352 */ 1336 */
1353static void fcoe_dev_setup() 1337static void fcoe_dev_setup(void)
1354{ 1338{
1355 register_netdevice_notifier(&fcoe_notifier); 1339 register_netdevice_notifier(&fcoe_notifier);
1356} 1340}
1357 1341
1358/** 1342/**
1359 * fcoe_dev_setup() - cleanup link change notification interface 1343 * fcoe_dev_cleanup() - cleanup link change notification interface
1360 */ 1344 */
1361static void fcoe_dev_cleanup(void) 1345static void fcoe_dev_cleanup(void)
1362{ 1346{
@@ -1815,10 +1799,6 @@ static int __init fcoe_init(void)
1815 /* Setup link change notification */ 1799 /* Setup link change notification */
1816 fcoe_dev_setup(); 1800 fcoe_dev_setup();
1817 1801
1818 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1819
1820 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1821
1822 fcoe_if_init(); 1802 fcoe_if_init();
1823 1803
1824 return 0; 1804 return 0;
@@ -1844,9 +1824,6 @@ static void __exit fcoe_exit(void)
1844 1824
1845 fcoe_dev_cleanup(); 1825 fcoe_dev_cleanup();
1846 1826
1847 /* Stop the timer */
1848 del_timer_sync(&fcoe_timer);
1849
1850 /* releases the associated fcoe hosts */ 1827 /* releases the associated fcoe hosts */
1851 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 1828 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1852 fcoe_if_destroy(fc->real_dev); 1829 fcoe_if_destroy(fc->real_dev);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 917aae886897..a1eb8c1988b0 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -61,6 +61,7 @@ struct fcoe_softc {
61 struct packet_type fip_packet_type; 61 struct packet_type fip_packet_type;
62 struct sk_buff_head fcoe_pending_queue; 62 struct sk_buff_head fcoe_pending_queue;
63 u8 fcoe_pending_queue_active; 63 u8 fcoe_pending_queue_active;
64 struct timer_list timer; /* queue timer */
64 struct fcoe_ctlr ctlr; 65 struct fcoe_ctlr ctlr;
65}; 66};
66 67
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 62ba0f39c6bd..929411880e4b 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
213 sol->desc.size.fd_size = htons(fcoe_size); 213 sol->desc.size.fd_size = htons(fcoe_size);
214 214
215 skb_put(skb, sizeof(*sol)); 215 skb_put(skb, sizeof(*sol));
216 skb->protocol = htons(ETH_P_802_3); 216 skb->protocol = htons(ETH_P_FIP);
217 skb_reset_mac_header(skb); 217 skb_reset_mac_header(skb);
218 skb_reset_network_header(skb); 218 skb_reset_network_header(skb);
219 fip->send(fip, skb); 219 fip->send(fip, skb);
@@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
365 } 365 }
366 366
367 skb_put(skb, len); 367 skb_put(skb, len);
368 skb->protocol = htons(ETH_P_802_3); 368 skb->protocol = htons(ETH_P_FIP);
369 skb_reset_mac_header(skb); 369 skb_reset_mac_header(skb);
370 skb_reset_network_header(skb); 370 skb_reset_network_header(skb);
371 fip->send(fip, skb); 371 fip->send(fip, skb);
@@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
424 if (dtype != ELS_FLOGI) 424 if (dtype != ELS_FLOGI)
425 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); 425 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
426 426
427 skb->protocol = htons(ETH_P_802_3); 427 skb->protocol = htons(ETH_P_FIP);
428 skb_reset_mac_header(skb); 428 skb_reset_mac_header(skb);
429 skb_reset_network_header(skb); 429 skb_reset_network_header(skb);
430 return 0; 430 return 0;
@@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
447 u16 old_xid; 447 u16 old_xid;
448 u8 op; 448 u8 op;
449 449
450 if (fip->state == FIP_ST_NON_FIP)
451 return 0;
452
453 fh = (struct fc_frame_header *)skb->data; 450 fh = (struct fc_frame_header *)skb->data;
454 op = *(u8 *)(fh + 1); 451 op = *(u8 *)(fh + 1);
455 452
456 switch (op) { 453 if (op == ELS_FLOGI) {
457 case ELS_FLOGI:
458 old_xid = fip->flogi_oxid; 454 old_xid = fip->flogi_oxid;
459 fip->flogi_oxid = ntohs(fh->fh_ox_id); 455 fip->flogi_oxid = ntohs(fh->fh_ox_id);
460 if (fip->state == FIP_ST_AUTO) { 456 if (fip->state == FIP_ST_AUTO) {
@@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
466 fip->map_dest = 1; 462 fip->map_dest = 1;
467 return 0; 463 return 0;
468 } 464 }
465 if (fip->state == FIP_ST_NON_FIP)
466 fip->map_dest = 1;
467 }
468
469 if (fip->state == FIP_ST_NON_FIP)
470 return 0;
471
472 switch (op) {
473 case ELS_FLOGI:
469 op = FIP_DT_FLOGI; 474 op = FIP_DT_FLOGI;
470 break; 475 break;
471 case ELS_FDISC: 476 case ELS_FDISC:
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 32ef6b87d895..a84072865fc2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
680 } 680 }
681 681
682 lp->max_retry_count = fnic->config.flogi_retries; 682 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->max_rport_retry_count = fnic->config.plogi_retries;
683 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 684 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
684 FCP_SPPF_CONF_COMPL); 685 FCP_SPPF_CONF_COMPL);
685 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) 686 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e13..1258da34fbc2 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
152 struct Scsi_Host *host, gdth_ha_str *ha) 152 struct Scsi_Host *host, gdth_ha_str *ha)
153{ 153{
154 int size = 0,len = 0; 154 int size = 0,len = 0;
155 int hlen;
155 off_t begin = 0,pos = 0; 156 off_t begin = 0,pos = 0;
156 int id, i, j, k, sec, flag; 157 int id, i, j, k, sec, flag;
157 int no_mdrv = 0, drv_no, is_mirr; 158 int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
192 if (reserve_list[0] == 0xff) 193 if (reserve_list[0] == 0xff)
193 strcpy(hrec, "--"); 194 strcpy(hrec, "--");
194 else { 195 else {
195 sprintf(hrec, "%d", reserve_list[0]); 196 hlen = sprintf(hrec, "%d", reserve_list[0]);
196 for (i = 1; i < MAX_RES_ARGS; i++) { 197 for (i = 1; i < MAX_RES_ARGS; i++) {
197 if (reserve_list[i] == 0xff) 198 if (reserve_list[i] == 0xff)
198 break; 199 break;
199 sprintf(hrec,"%s,%d", hrec, reserve_list[i]); 200 hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
200 } 201 }
201 } 202 }
202 size = sprintf(buffer+len, 203 size = sprintf(buffer+len,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea4abee7a2a9..b4b805e8d7db 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -110,7 +110,7 @@ static const struct {
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, 110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, 111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, 112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, 113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, 114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); 143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); 144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
145static void ibmvfc_tgt_query_target(struct ibmvfc_target *); 145static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
146static void ibmvfc_npiv_logout(struct ibmvfc_host *);
146 147
147static const char *unknown_error = "unknown error"; 148static const char *unknown_error = "unknown error";
148 149
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
275 int fc_rsp_len = rsp->fcp_rsp_len; 276 int fc_rsp_len = rsp->fcp_rsp_len;
276 277
277 if ((rsp->flags & FCP_RSP_LEN_VALID) && 278 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
278 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || 279 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
279 rsp->data.info.rsp_code)) 280 rsp->data.info.rsp_code))
280 return DID_ERROR << 16; 281 return DID_ERROR << 16;
281 282
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
431 case IBMVFC_TGT_ACTION_DEL_RPORT: 432 case IBMVFC_TGT_ACTION_DEL_RPORT:
432 break; 433 break;
433 default: 434 default:
435 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
436 tgt->add_rport = 0;
434 tgt->action = action; 437 tgt->action = action;
435 break; 438 break;
436 } 439 }
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
475 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) 478 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
476 vhost->action = action; 479 vhost->action = action;
477 break; 480 break;
481 case IBMVFC_HOST_ACTION_LOGO_WAIT:
482 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
483 vhost->action = action;
484 break;
478 case IBMVFC_HOST_ACTION_INIT_WAIT: 485 case IBMVFC_HOST_ACTION_INIT_WAIT:
479 if (vhost->action == IBMVFC_HOST_ACTION_INIT) 486 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
480 vhost->action = action; 487 vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
483 switch (vhost->action) { 490 switch (vhost->action) {
484 case IBMVFC_HOST_ACTION_INIT_WAIT: 491 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 case IBMVFC_HOST_ACTION_NONE: 492 case IBMVFC_HOST_ACTION_NONE:
486 case IBMVFC_HOST_ACTION_TGT_ADD: 493 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
487 vhost->action = action; 494 vhost->action = action;
488 break; 495 break;
489 default: 496 default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
494 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 501 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
495 vhost->action = action; 502 vhost->action = action;
496 break; 503 break;
504 case IBMVFC_HOST_ACTION_LOGO:
497 case IBMVFC_HOST_ACTION_INIT: 505 case IBMVFC_HOST_ACTION_INIT:
498 case IBMVFC_HOST_ACTION_TGT_DEL: 506 case IBMVFC_HOST_ACTION_TGT_DEL:
499 case IBMVFC_HOST_ACTION_QUERY_TGTS: 507 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 508 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
501 case IBMVFC_HOST_ACTION_TGT_ADD:
502 case IBMVFC_HOST_ACTION_NONE: 509 case IBMVFC_HOST_ACTION_NONE:
503 default: 510 default:
504 vhost->action = action; 511 vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
576 } 583 }
577 584
578 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
579 tgt->need_login = 1; 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
580 scsi_block_requests(vhost->host); 587 scsi_block_requests(vhost->host);
581 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 588 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
582 vhost->job_step = ibmvfc_npiv_login; 589 vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
646 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
647 654
648 vhost->state = IBMVFC_NO_CRQ; 655 vhost->state = IBMVFC_NO_CRQ;
656 vhost->logged_in = 0;
649 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 657 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
650 free_page((unsigned long)crq->msgs); 658 free_page((unsigned long)crq->msgs);
651} 659}
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
692 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 700 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
693 701
694 vhost->state = IBMVFC_NO_CRQ; 702 vhost->state = IBMVFC_NO_CRQ;
703 vhost->logged_in = 0;
695 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 704 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
696 705
697 /* Clean out the queue */ 706 /* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
807} 816}
808 817
809/** 818/**
810 * __ibmvfc_reset_host - Reset the connection to the server (no locking) 819 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
811 * @vhost: struct ibmvfc host to reset 820 * @vhost: struct ibmvfc host to reset
812 **/ 821 **/
813static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) 822static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
814{ 823{
815 int rc; 824 int rc;
816 825
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
826} 835}
827 836
828/** 837/**
829 * ibmvfc_reset_host - Reset the connection to the server 838 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
830 * @vhost: struct ibmvfc host to reset 839 * @vhost: struct ibmvfc host to reset
831 **/ 840 **/
841static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
842{
843 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
844 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
845 scsi_block_requests(vhost->host);
846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
847 vhost->job_step = ibmvfc_npiv_logout;
848 wake_up(&vhost->work_wait_q);
849 } else
850 ibmvfc_hard_reset_host(vhost);
851}
852
853/**
854 * ibmvfc_reset_host - Reset the connection to the server
855 * @vhost: ibmvfc host struct
856 **/
832static void ibmvfc_reset_host(struct ibmvfc_host *vhost) 857static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
833{ 858{
834 unsigned long flags; 859 unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
842 * ibmvfc_retry_host_init - Retry host initialization if allowed 867 * ibmvfc_retry_host_init - Retry host initialization if allowed
843 * @vhost: ibmvfc host struct 868 * @vhost: ibmvfc host struct
844 * 869 *
870 * Returns: 1 if init will be retried / 0 if not
871 *
845 **/ 872 **/
846static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 873static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
847{ 874{
875 int retry = 0;
876
848 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 877 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
849 vhost->delay_init = 1; 878 vhost->delay_init = 1;
850 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { 879 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 882 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) 883 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 884 __ibmvfc_reset_host(vhost);
856 else 885 else {
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
887 retry = 1;
888 }
858 } 889 }
859 890
860 wake_up(&vhost->work_wait_q); 891 wake_up(&vhost->work_wait_q);
892 return retry;
861} 893}
862 894
863/** 895/**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1137 login_info->partition_num = vhost->partition_number; 1169 login_info->partition_num = vhost->partition_number;
1138 login_info->vfc_frame_version = 1; 1170 login_info->vfc_frame_version = 1;
1139 login_info->fcp_version = 3; 1171 login_info->fcp_version = 3;
1172 login_info->flags = IBMVFC_FLUSH_ON_HALT;
1140 if (vhost->client_migrated) 1173 if (vhost->client_migrated)
1141 login_info->flags = IBMVFC_CLIENT_MIGRATED; 1174 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1142 1175
1143 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1176 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1144 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1177 login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1452} 1485}
1453 1486
1454/** 1487/**
1488 * ibmvfc_relogin - Log back into the specified device
1489 * @sdev: scsi device struct
1490 *
1491 **/
1492static void ibmvfc_relogin(struct scsi_device *sdev)
1493{
1494 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1495 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1496 struct ibmvfc_target *tgt;
1497
1498 list_for_each_entry(tgt, &vhost->targets, queue) {
1499 if (rport == tgt->rport) {
1500 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1501 break;
1502 }
1503 }
1504
1505 ibmvfc_reinit_host(vhost);
1506}
1507
1508/**
1455 * ibmvfc_scsi_done - Handle responses from commands 1509 * ibmvfc_scsi_done - Handle responses from commands
1456 * @evt: ibmvfc event to be handled 1510 * @evt: ibmvfc event to be handled
1457 * 1511 *
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1483 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1537 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1484 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1538 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1485 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) 1539 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1486 ibmvfc_reinit_host(evt->vhost); 1540 ibmvfc_relogin(cmnd->device);
1487 1541
1488 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) 1542 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1489 cmnd->result = (DID_ERROR << 16); 1543 cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2148 struct ibmvfc_host *vhost) 2202 struct ibmvfc_host *vhost)
2149{ 2203{
2150 const char *desc = ibmvfc_get_ae_desc(crq->event); 2204 const char *desc = ibmvfc_get_ae_desc(crq->event);
2205 struct ibmvfc_target *tgt;
2151 2206
2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2207 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2153 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2208 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2154 2209
2155 switch (crq->event) { 2210 switch (crq->event) {
2156 case IBMVFC_AE_LINK_UP:
2157 case IBMVFC_AE_RESUME: 2211 case IBMVFC_AE_RESUME:
2212 switch (crq->link_state) {
2213 case IBMVFC_AE_LS_LINK_DOWN:
2214 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2215 break;
2216 case IBMVFC_AE_LS_LINK_DEAD:
2217 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2218 break;
2219 case IBMVFC_AE_LS_LINK_UP:
2220 case IBMVFC_AE_LS_LINK_BOUNCED:
2221 default:
2222 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2223 vhost->delay_init = 1;
2224 __ibmvfc_reset_host(vhost);
2225 break;
2226 };
2227
2228 break;
2229 case IBMVFC_AE_LINK_UP:
2158 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2230 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2159 vhost->delay_init = 1; 2231 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost); 2232 __ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2168 case IBMVFC_AE_SCN_NPORT: 2240 case IBMVFC_AE_SCN_NPORT:
2169 case IBMVFC_AE_SCN_GROUP: 2241 case IBMVFC_AE_SCN_GROUP:
2170 vhost->events_to_log |= IBMVFC_AE_RSCN; 2242 vhost->events_to_log |= IBMVFC_AE_RSCN;
2243 ibmvfc_reinit_host(vhost);
2244 break;
2171 case IBMVFC_AE_ELS_LOGO: 2245 case IBMVFC_AE_ELS_LOGO:
2172 case IBMVFC_AE_ELS_PRLO: 2246 case IBMVFC_AE_ELS_PRLO:
2173 case IBMVFC_AE_ELS_PLOGI: 2247 case IBMVFC_AE_ELS_PLOGI:
2248 list_for_each_entry(tgt, &vhost->targets, queue) {
2249 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2250 break;
2251 if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2252 continue;
2253 if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2254 continue;
2255 if (crq->node_name && tgt->ids.node_name != crq->node_name)
2256 continue;
2257 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2258 }
2259
2174 ibmvfc_reinit_host(vhost); 2260 ibmvfc_reinit_host(vhost);
2175 break; 2261 break;
2176 case IBMVFC_AE_LINK_DOWN: 2262 case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2222 return; 2308 return;
2223 case IBMVFC_CRQ_XPORT_EVENT: 2309 case IBMVFC_CRQ_XPORT_EVENT:
2224 vhost->state = IBMVFC_NO_CRQ; 2310 vhost->state = IBMVFC_NO_CRQ;
2311 vhost->logged_in = 0;
2225 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2312 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2226 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2313 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2227 /* We need to re-setup the interpartition connection */ 2314 /* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2299 done = 1; 2386 done = 1;
2300 } 2387 }
2301 2388
2302 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) 2389 if (vhost->scan_complete)
2303 done = 1; 2390 done = 1;
2304 spin_unlock_irqrestore(shost->host_lock, flags); 2391 spin_unlock_irqrestore(shost->host_lock, flags);
2305 return done; 2392 return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2434 vhost->login_buf->resp.partition_name); 2521 vhost->login_buf->resp.partition_name);
2435} 2522}
2436 2523
2437static struct device_attribute ibmvfc_host_partition_name = {
2438 .attr = {
2439 .name = "partition_name",
2440 .mode = S_IRUGO,
2441 },
2442 .show = ibmvfc_show_host_partition_name,
2443};
2444
2445static ssize_t ibmvfc_show_host_device_name(struct device *dev, 2524static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2446 struct device_attribute *attr, char *buf) 2525 struct device_attribute *attr, char *buf)
2447{ 2526{
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2452 vhost->login_buf->resp.device_name); 2531 vhost->login_buf->resp.device_name);
2453} 2532}
2454 2533
2455static struct device_attribute ibmvfc_host_device_name = {
2456 .attr = {
2457 .name = "device_name",
2458 .mode = S_IRUGO,
2459 },
2460 .show = ibmvfc_show_host_device_name,
2461};
2462
2463static ssize_t ibmvfc_show_host_loc_code(struct device *dev, 2534static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2464 struct device_attribute *attr, char *buf) 2535 struct device_attribute *attr, char *buf)
2465{ 2536{
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2470 vhost->login_buf->resp.port_loc_code); 2541 vhost->login_buf->resp.port_loc_code);
2471} 2542}
2472 2543
2473static struct device_attribute ibmvfc_host_loc_code = {
2474 .attr = {
2475 .name = "port_loc_code",
2476 .mode = S_IRUGO,
2477 },
2478 .show = ibmvfc_show_host_loc_code,
2479};
2480
2481static ssize_t ibmvfc_show_host_drc_name(struct device *dev, 2544static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2482 struct device_attribute *attr, char *buf) 2545 struct device_attribute *attr, char *buf)
2483{ 2546{
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2488 vhost->login_buf->resp.drc_name); 2551 vhost->login_buf->resp.drc_name);
2489} 2552}
2490 2553
2491static struct device_attribute ibmvfc_host_drc_name = {
2492 .attr = {
2493 .name = "drc_name",
2494 .mode = S_IRUGO,
2495 },
2496 .show = ibmvfc_show_host_drc_name,
2497};
2498
2499static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, 2554static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2500 struct device_attribute *attr, char *buf) 2555 struct device_attribute *attr, char *buf)
2501{ 2556{
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2504 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); 2559 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2505} 2560}
2506 2561
2507static struct device_attribute ibmvfc_host_npiv_version = { 2562static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2508 .attr = { 2563 struct device_attribute *attr, char *buf)
2509 .name = "npiv_version", 2564{
2510 .mode = S_IRUGO, 2565 struct Scsi_Host *shost = class_to_shost(dev);
2511 }, 2566 struct ibmvfc_host *vhost = shost_priv(shost);
2512 .show = ibmvfc_show_host_npiv_version, 2567 return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2513}; 2568}
2514 2569
2515/** 2570/**
2516 * ibmvfc_show_log_level - Show the adapter's error logging level 2571 * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
2556 return strlen(buf); 2611 return strlen(buf);
2557} 2612}
2558 2613
2559static struct device_attribute ibmvfc_log_level_attr = { 2614static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2560 .attr = { 2615static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2561 .name = "log_level", 2616static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2562 .mode = S_IRUGO | S_IWUSR, 2617static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2563 }, 2618static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2564 .show = ibmvfc_show_log_level, 2619static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2565 .store = ibmvfc_store_log_level 2620static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2566}; 2621 ibmvfc_show_log_level, ibmvfc_store_log_level);
2567 2622
2568#ifdef CONFIG_SCSI_IBMVFC_TRACE 2623#ifdef CONFIG_SCSI_IBMVFC_TRACE
2569/** 2624/**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
2612#endif 2667#endif
2613 2668
2614static struct device_attribute *ibmvfc_attrs[] = { 2669static struct device_attribute *ibmvfc_attrs[] = {
2615 &ibmvfc_host_partition_name, 2670 &dev_attr_partition_name,
2616 &ibmvfc_host_device_name, 2671 &dev_attr_device_name,
2617 &ibmvfc_host_loc_code, 2672 &dev_attr_port_loc_code,
2618 &ibmvfc_host_drc_name, 2673 &dev_attr_drc_name,
2619 &ibmvfc_host_npiv_version, 2674 &dev_attr_npiv_version,
2620 &ibmvfc_log_level_attr, 2675 &dev_attr_capabilities,
2676 &dev_attr_log_level,
2621 NULL 2677 NULL
2622}; 2678};
2623 2679
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2774 * @tgt: ibmvfc target struct 2830 * @tgt: ibmvfc target struct
2775 * @job_step: initialization job step 2831 * @job_step: initialization job step
2776 * 2832 *
2833 * Returns: 1 if step will be retried / 0 if not
2834 *
2777 **/ 2835 **/
2778static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2836static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2779 void (*job_step) (struct ibmvfc_target *)) 2837 void (*job_step) (struct ibmvfc_target *))
2780{ 2838{
2781 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { 2839 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2782 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2840 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2783 wake_up(&tgt->vhost->work_wait_q); 2841 wake_up(&tgt->vhost->work_wait_q);
2842 return 0;
2784 } else 2843 } else
2785 ibmvfc_init_tgt(tgt, job_step); 2844 ibmvfc_init_tgt(tgt, job_step);
2845 return 1;
2786} 2846}
2787 2847
2788/* Defined in FC-LS */ 2848/* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2831 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; 2891 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2832 struct ibmvfc_prli_svc_parms *parms = &rsp->parms; 2892 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
2833 u32 status = rsp->common.status; 2893 u32 status = rsp->common.status;
2834 int index; 2894 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
2835 2895
2836 vhost->discovery_threads--; 2896 vhost->discovery_threads--;
2837 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2897 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2850 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; 2910 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
2851 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) 2911 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
2852 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 2912 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
2853 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); 2913 tgt->add_rport = 1;
2854 } else 2914 } else
2855 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2915 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2856 } else if (prli_rsp[index].retry) 2916 } else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2867 break; 2927 break;
2868 case IBMVFC_MAD_FAILED: 2928 case IBMVFC_MAD_FAILED:
2869 default: 2929 default:
2870 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2871 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2872 rsp->status, rsp->error, status);
2873 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2930 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2874 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2931 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2875 else 2932 else
2876 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2933 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2934
2935 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2936 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2937 rsp->status, rsp->error, status);
2877 break; 2938 break;
2878 }; 2939 };
2879 2940
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2932 struct ibmvfc_host *vhost = evt->vhost; 2993 struct ibmvfc_host *vhost = evt->vhost;
2933 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; 2994 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2934 u32 status = rsp->common.status; 2995 u32 status = rsp->common.status;
2996 int level = IBMVFC_DEFAULT_LOG_LEVEL;
2935 2997
2936 vhost->discovery_threads--; 2998 vhost->discovery_threads--;
2937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2999 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2960 break; 3022 break;
2961 case IBMVFC_MAD_FAILED: 3023 case IBMVFC_MAD_FAILED:
2962 default: 3024 default:
2963 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2964 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2965 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2966 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2967
2968 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3025 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2969 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 3026 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2970 else 3027 else
2971 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3028 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3029
3030 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3031 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3032 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3033 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2972 break; 3034 break;
2973 }; 3035 };
2974 3036
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3129 case IBMVFC_MAD_SUCCESS: 3191 case IBMVFC_MAD_SUCCESS:
3130 tgt_dbg(tgt, "ADISC succeeded\n"); 3192 tgt_dbg(tgt, "ADISC succeeded\n");
3131 if (ibmvfc_adisc_needs_plogi(mad, tgt)) 3193 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3132 tgt->need_login = 1; 3194 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3133 break; 3195 break;
3134 case IBMVFC_MAD_DRIVER_FAILED: 3196 case IBMVFC_MAD_DRIVER_FAILED:
3135 break; 3197 break;
3136 case IBMVFC_MAD_FAILED: 3198 case IBMVFC_MAD_FAILED:
3137 default: 3199 default:
3138 tgt->need_login = 1; 3200 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3139 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; 3201 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3140 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; 3202 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3141 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3203 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3322 struct ibmvfc_host *vhost = evt->vhost; 3384 struct ibmvfc_host *vhost = evt->vhost;
3323 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; 3385 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3324 u32 status = rsp->common.status; 3386 u32 status = rsp->common.status;
3387 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3325 3388
3326 vhost->discovery_threads--; 3389 vhost->discovery_threads--;
3327 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3390 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3341 break; 3404 break;
3342 case IBMVFC_MAD_FAILED: 3405 case IBMVFC_MAD_FAILED:
3343 default: 3406 default:
3344 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3345 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3346 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3347 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3348
3349 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && 3407 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3350 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && 3408 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3351 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) 3409 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3352 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3410 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3353 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3411 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3354 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3412 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3355 else 3413 else
3356 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3414 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3415
3416 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3417 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3418 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3419 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3357 break; 3420 break;
3358 }; 3421 };
3359 3422
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3420 } 3483 }
3421 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3484 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3422 3485
3423 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); 3486 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3424 if (!tgt) { 3487 if (!tgt) {
3425 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", 3488 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3426 scsi_id); 3489 scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3472 struct ibmvfc_host *vhost = evt->vhost; 3535 struct ibmvfc_host *vhost = evt->vhost;
3473 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; 3536 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3474 u32 mad_status = rsp->common.status; 3537 u32 mad_status = rsp->common.status;
3538 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3475 3539
3476 switch (mad_status) { 3540 switch (mad_status) {
3477 case IBMVFC_MAD_SUCCESS: 3541 case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3480 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); 3544 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3481 break; 3545 break;
3482 case IBMVFC_MAD_FAILED: 3546 case IBMVFC_MAD_FAILED:
3483 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", 3547 level += ibmvfc_retry_host_init(vhost);
3484 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); 3548 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3485 ibmvfc_retry_host_init(vhost); 3549 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3486 break; 3550 break;
3487 case IBMVFC_MAD_DRIVER_FAILED: 3551 case IBMVFC_MAD_DRIVER_FAILED:
3488 break; 3552 break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3534 u32 mad_status = evt->xfer_iu->npiv_login.common.status; 3598 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3535 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; 3599 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3536 unsigned int npiv_max_sectors; 3600 unsigned int npiv_max_sectors;
3601 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3537 3602
3538 switch (mad_status) { 3603 switch (mad_status) {
3539 case IBMVFC_MAD_SUCCESS: 3604 case IBMVFC_MAD_SUCCESS:
3540 ibmvfc_free_event(evt); 3605 ibmvfc_free_event(evt);
3541 break; 3606 break;
3542 case IBMVFC_MAD_FAILED: 3607 case IBMVFC_MAD_FAILED:
3543 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3544 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3545 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3608 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3546 ibmvfc_retry_host_init(vhost); 3609 level += ibmvfc_retry_host_init(vhost);
3547 else 3610 else
3548 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 3611 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3612 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3613 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3549 ibmvfc_free_event(evt); 3614 ibmvfc_free_event(evt);
3550 return; 3615 return;
3551 case IBMVFC_MAD_CRQ_ERROR: 3616 case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3578 return; 3643 return;
3579 } 3644 }
3580 3645
3646 vhost->logged_in = 1;
3581 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); 3647 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3582 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", 3648 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3583 rsp->partition_name, rsp->device_name, rsp->port_loc_code, 3649 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3636}; 3702};
3637 3703
3638/** 3704/**
3705 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
3706 * @vhost: ibmvfc host struct
3707 *
3708 **/
3709static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3710{
3711 struct ibmvfc_host *vhost = evt->vhost;
3712 u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
3713
3714 ibmvfc_free_event(evt);
3715
3716 switch (mad_status) {
3717 case IBMVFC_MAD_SUCCESS:
3718 if (list_empty(&vhost->sent) &&
3719 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3720 ibmvfc_init_host(vhost, 0);
3721 return;
3722 }
3723 break;
3724 case IBMVFC_MAD_FAILED:
3725 case IBMVFC_MAD_NOT_SUPPORTED:
3726 case IBMVFC_MAD_CRQ_ERROR:
3727 case IBMVFC_MAD_DRIVER_FAILED:
3728 default:
3729 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
3730 break;
3731 }
3732
3733 ibmvfc_hard_reset_host(vhost);
3734}
3735
3736/**
3737 * ibmvfc_npiv_logout - Issue an NPIV Logout
3738 * @vhost: ibmvfc host struct
3739 *
3740 **/
3741static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
3742{
3743 struct ibmvfc_npiv_logout_mad *mad;
3744 struct ibmvfc_event *evt;
3745
3746 evt = ibmvfc_get_event(vhost);
3747 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
3748
3749 mad = &evt->iu.npiv_logout;
3750 memset(mad, 0, sizeof(*mad));
3751 mad->common.version = 1;
3752 mad->common.opcode = IBMVFC_NPIV_LOGOUT;
3753 mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
3754
3755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
3756
3757 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3758 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
3759 else
3760 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3761}
3762
3763/**
3639 * ibmvfc_dev_init_to_do - Is there target initialization work to do? 3764 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3640 * @vhost: ibmvfc host struct 3765 * @vhost: ibmvfc host struct
3641 * 3766 *
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3671 switch (vhost->action) { 3796 switch (vhost->action) {
3672 case IBMVFC_HOST_ACTION_NONE: 3797 case IBMVFC_HOST_ACTION_NONE:
3673 case IBMVFC_HOST_ACTION_INIT_WAIT: 3798 case IBMVFC_HOST_ACTION_INIT_WAIT:
3799 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3674 return 0; 3800 return 0;
3675 case IBMVFC_HOST_ACTION_TGT_INIT: 3801 case IBMVFC_HOST_ACTION_TGT_INIT:
3676 case IBMVFC_HOST_ACTION_QUERY_TGTS: 3802 case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3683 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) 3809 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3684 return 0; 3810 return 0;
3685 return 1; 3811 return 1;
3812 case IBMVFC_HOST_ACTION_LOGO:
3686 case IBMVFC_HOST_ACTION_INIT: 3813 case IBMVFC_HOST_ACTION_INIT:
3687 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3814 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3688 case IBMVFC_HOST_ACTION_TGT_ADD:
3689 case IBMVFC_HOST_ACTION_TGT_DEL: 3815 case IBMVFC_HOST_ACTION_TGT_DEL:
3690 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 3816 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3691 case IBMVFC_HOST_ACTION_QUERY: 3817 case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3740static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) 3866static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3741{ 3867{
3742 struct ibmvfc_host *vhost = tgt->vhost; 3868 struct ibmvfc_host *vhost = tgt->vhost;
3743 struct fc_rport *rport = tgt->rport; 3869 struct fc_rport *rport;
3744 unsigned long flags; 3870 unsigned long flags;
3745 3871
3746 if (rport) { 3872 tgt_dbg(tgt, "Adding rport\n");
3747 tgt_dbg(tgt, "Setting rport roles\n"); 3873 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3748 fc_remote_port_rolechg(rport, tgt->ids.roles); 3874 spin_lock_irqsave(vhost->host->host_lock, flags);
3749 spin_lock_irqsave(vhost->host->host_lock, flags); 3875
3750 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3876 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3877 tgt_dbg(tgt, "Deleting rport\n");
3878 list_del(&tgt->queue);
3751 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3879 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3880 fc_remote_port_delete(rport);
3881 del_timer_sync(&tgt->timer);
3882 kref_put(&tgt->kref, ibmvfc_release_tgt);
3752 return; 3883 return;
3753 } 3884 }
3754 3885
3755 tgt_dbg(tgt, "Adding rport\n");
3756 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3757 spin_lock_irqsave(vhost->host->host_lock, flags);
3758 tgt->rport = rport;
3759 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3760 if (rport) { 3886 if (rport) {
3761 tgt_dbg(tgt, "rport add succeeded\n"); 3887 tgt_dbg(tgt, "rport add succeeded\n");
3888 tgt->rport = rport;
3762 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3889 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3763 rport->supported_classes = 0; 3890 rport->supported_classes = 0;
3764 tgt->target_id = rport->scsi_target_id; 3891 tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3789 vhost->events_to_log = 0; 3916 vhost->events_to_log = 0;
3790 switch (vhost->action) { 3917 switch (vhost->action) {
3791 case IBMVFC_HOST_ACTION_NONE: 3918 case IBMVFC_HOST_ACTION_NONE:
3919 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3792 case IBMVFC_HOST_ACTION_INIT_WAIT: 3920 case IBMVFC_HOST_ACTION_INIT_WAIT:
3793 break; 3921 break;
3922 case IBMVFC_HOST_ACTION_LOGO:
3923 vhost->job_step(vhost);
3924 break;
3794 case IBMVFC_HOST_ACTION_INIT: 3925 case IBMVFC_HOST_ACTION_INIT:
3795 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3926 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3796 if (vhost->delay_init) { 3927 if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3836 3967
3837 if (vhost->state == IBMVFC_INITIALIZING) { 3968 if (vhost->state == IBMVFC_INITIALIZING) {
3838 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { 3969 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3839 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3970 if (vhost->reinit) {
3840 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); 3971 vhost->reinit = 0;
3841 vhost->init_retries = 0; 3972 scsi_block_requests(vhost->host);
3842 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3973 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3843 scsi_unblock_requests(vhost->host); 3974 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3975 } else {
3976 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3977 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3978 wake_up(&vhost->init_wait_q);
3979 schedule_work(&vhost->rport_add_work_q);
3980 vhost->init_retries = 0;
3981 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3982 scsi_unblock_requests(vhost->host);
3983 }
3984
3844 return; 3985 return;
3845 } else { 3986 } else {
3846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3987 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3871 if (!ibmvfc_dev_init_to_do(vhost)) 4012 if (!ibmvfc_dev_init_to_do(vhost))
3872 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); 4013 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3873 break; 4014 break;
3874 case IBMVFC_HOST_ACTION_TGT_ADD:
3875 list_for_each_entry(tgt, &vhost->targets, queue) {
3876 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3877 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3878 ibmvfc_tgt_add_rport(tgt);
3879 return;
3880 }
3881 }
3882
3883 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3884 vhost->reinit = 0;
3885 scsi_block_requests(vhost->host);
3886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3887 } else {
3888 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3889 wake_up(&vhost->init_wait_q);
3890 }
3891 break;
3892 default: 4015 default:
3893 break; 4016 break;
3894 }; 4017 };
@@ -4118,6 +4241,56 @@ nomem:
4118} 4241}
4119 4242
4120/** 4243/**
4244 * ibmvfc_rport_add_thread - Worker thread for rport adds
4245 * @work: work struct
4246 *
4247 **/
4248static void ibmvfc_rport_add_thread(struct work_struct *work)
4249{
4250 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4251 rport_add_work_q);
4252 struct ibmvfc_target *tgt;
4253 struct fc_rport *rport;
4254 unsigned long flags;
4255 int did_work;
4256
4257 ENTER;
4258 spin_lock_irqsave(vhost->host->host_lock, flags);
4259 do {
4260 did_work = 0;
4261 if (vhost->state != IBMVFC_ACTIVE)
4262 break;
4263
4264 list_for_each_entry(tgt, &vhost->targets, queue) {
4265 if (tgt->add_rport) {
4266 did_work = 1;
4267 tgt->add_rport = 0;
4268 kref_get(&tgt->kref);
4269 rport = tgt->rport;
4270 if (!rport) {
4271 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4272 ibmvfc_tgt_add_rport(tgt);
4273 } else if (get_device(&rport->dev)) {
4274 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4275 tgt_dbg(tgt, "Setting rport roles\n");
4276 fc_remote_port_rolechg(rport, tgt->ids.roles);
4277 put_device(&rport->dev);
4278 }
4279
4280 kref_put(&tgt->kref, ibmvfc_release_tgt);
4281 spin_lock_irqsave(vhost->host->host_lock, flags);
4282 break;
4283 }
4284 }
4285 } while(did_work);
4286
4287 if (vhost->state == IBMVFC_ACTIVE)
4288 vhost->scan_complete = 1;
4289 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4290 LEAVE;
4291}
4292
4293/**
4121 * ibmvfc_probe - Adapter hot plug add entry point 4294 * ibmvfc_probe - Adapter hot plug add entry point
4122 * @vdev: vio device struct 4295 * @vdev: vio device struct
4123 * @id: vio device id struct 4296 * @id: vio device id struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4160 strcpy(vhost->partition_name, "UNKNOWN"); 4333 strcpy(vhost->partition_name, "UNKNOWN");
4161 init_waitqueue_head(&vhost->work_wait_q); 4334 init_waitqueue_head(&vhost->work_wait_q);
4162 init_waitqueue_head(&vhost->init_wait_q); 4335 init_waitqueue_head(&vhost->init_wait_q);
4336 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4163 4337
4164 if ((rc = ibmvfc_alloc_mem(vhost))) 4338 if ((rc = ibmvfc_alloc_mem(vhost)))
4165 goto free_scsi_host; 4339 goto free_scsi_host;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ca1dcf7a7568..c2668d7d67f5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.5" 32#define IBMVFC_DRIVER_VERSION "1.0.6"
33#define IBMVFC_DRIVER_DATE "(March 19, 2009)" 33#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -57,9 +57,10 @@
57 * Ensure we have resources for ERP and initialization: 57 * Ensure we have resources for ERP and initialization:
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout
60 * 2 for each discovery thread 61 * 2 for each discovery thread
61 */ 62 */
62#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) 63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
63 64
64#define IBMVFC_MAD_SUCCESS 0x00 65#define IBMVFC_MAD_SUCCESS 0x00
65#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
127 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 128 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
128 IBMVFC_PASSTHRU = 0x0200, 129 IBMVFC_PASSTHRU = 0x0200,
129 IBMVFC_TMF_MAD = 0x0100, 130 IBMVFC_TMF_MAD = 0x0100,
131 IBMVFC_NPIV_LOGOUT = 0x0800,
130}; 132};
131 133
132struct ibmvfc_mad_common { 134struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
143 struct srp_direct_buf buffer; 145 struct srp_direct_buf buffer;
144}__attribute__((packed, aligned (8))); 146}__attribute__((packed, aligned (8)));
145 147
148struct ibmvfc_npiv_logout_mad {
149 struct ibmvfc_mad_common common;
150}__attribute__((packed, aligned (8)));
151
146#define IBMVFC_MAX_NAME 256 152#define IBMVFC_MAX_NAME 256
147 153
148struct ibmvfc_npiv_login { 154struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
201#define IBMVFC_NATIVE_FC 0x01 207#define IBMVFC_NATIVE_FC 0x01
202#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 208#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
203 u32 reserved; 209 u32 reserved;
204 u64 capabilites; 210 u64 capabilities;
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
205 u32 max_cmds; 212 u32 max_cmds;
206 u32 scsi_id_sz; 213 u32 scsi_id_sz;
207 u64 max_dma_len; 214 u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
541 dma_addr_t msg_token; 548 dma_addr_t msg_token;
542}; 549};
543 550
551enum ibmvfc_ae_link_state {
552 IBMVFC_AE_LS_LINK_UP = 0x01,
553 IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
554 IBMVFC_AE_LS_LINK_DOWN = 0x04,
555 IBMVFC_AE_LS_LINK_DEAD = 0x08,
556};
557
544struct ibmvfc_async_crq { 558struct ibmvfc_async_crq {
545 volatile u8 valid; 559 volatile u8 valid;
546 u8 pad[3]; 560 u8 link_state;
561 u8 pad[2];
547 u32 pad2; 562 u32 pad2;
548 volatile u64 event; 563 volatile u64 event;
549 volatile u64 scsi_id; 564 volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
561union ibmvfc_iu { 576union ibmvfc_iu {
562 struct ibmvfc_mad_common mad_common; 577 struct ibmvfc_mad_common mad_common;
563 struct ibmvfc_npiv_login_mad npiv_login; 578 struct ibmvfc_npiv_login_mad npiv_login;
579 struct ibmvfc_npiv_logout_mad npiv_logout;
564 struct ibmvfc_discover_targets discover_targets; 580 struct ibmvfc_discover_targets discover_targets;
565 struct ibmvfc_port_login plogi; 581 struct ibmvfc_port_login plogi;
566 struct ibmvfc_process_login prli; 582 struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
575 IBMVFC_TGT_ACTION_NONE = 0, 591 IBMVFC_TGT_ACTION_NONE = 0,
576 IBMVFC_TGT_ACTION_INIT, 592 IBMVFC_TGT_ACTION_INIT,
577 IBMVFC_TGT_ACTION_INIT_WAIT, 593 IBMVFC_TGT_ACTION_INIT_WAIT,
578 IBMVFC_TGT_ACTION_ADD_RPORT,
579 IBMVFC_TGT_ACTION_DEL_RPORT, 594 IBMVFC_TGT_ACTION_DEL_RPORT,
580}; 595};
581 596
@@ -588,6 +603,7 @@ struct ibmvfc_target {
588 int target_id; 603 int target_id;
589 enum ibmvfc_target_action action; 604 enum ibmvfc_target_action action;
590 int need_login; 605 int need_login;
606 int add_rport;
591 int init_retries; 607 int init_retries;
592 u32 cancel_key; 608 u32 cancel_key;
593 struct ibmvfc_service_parms service_parms; 609 struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
627 643
628enum ibmvfc_host_action { 644enum ibmvfc_host_action {
629 IBMVFC_HOST_ACTION_NONE = 0, 645 IBMVFC_HOST_ACTION_NONE = 0,
646 IBMVFC_HOST_ACTION_LOGO,
647 IBMVFC_HOST_ACTION_LOGO_WAIT,
630 IBMVFC_HOST_ACTION_INIT, 648 IBMVFC_HOST_ACTION_INIT,
631 IBMVFC_HOST_ACTION_INIT_WAIT, 649 IBMVFC_HOST_ACTION_INIT_WAIT,
632 IBMVFC_HOST_ACTION_QUERY, 650 IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
635 IBMVFC_HOST_ACTION_ALLOC_TGTS, 653 IBMVFC_HOST_ACTION_ALLOC_TGTS,
636 IBMVFC_HOST_ACTION_TGT_INIT, 654 IBMVFC_HOST_ACTION_TGT_INIT,
637 IBMVFC_HOST_ACTION_TGT_DEL_FAILED, 655 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
638 IBMVFC_HOST_ACTION_TGT_ADD,
639}; 656};
640 657
641enum ibmvfc_host_state { 658enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
682 int client_migrated; 699 int client_migrated;
683 int reinit; 700 int reinit;
684 int delay_init; 701 int delay_init;
702 int scan_complete;
703 int logged_in;
685 int events_to_log; 704 int events_to_log;
686#define IBMVFC_AE_LINKUP 0x0001 705#define IBMVFC_AE_LINKUP 0x0001
687#define IBMVFC_AE_LINKDOWN 0x0002 706#define IBMVFC_AE_LINKDOWN 0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
692 void (*job_step) (struct ibmvfc_host *); 711 void (*job_step) (struct ibmvfc_host *);
693 struct task_struct *work_thread; 712 struct task_struct *work_thread;
694 struct tasklet_struct tasklet; 713 struct tasklet_struct tasklet;
714 struct work_struct rport_add_work_q;
695 wait_queue_head_t init_wait_q; 715 wait_queue_head_t init_wait_q;
696 wait_queue_head_t work_wait_q; 716 wait_queue_head_t work_wait_q;
697}; 717};
@@ -707,6 +727,12 @@ struct ibmvfc_host {
707#define tgt_err(t, fmt, ...) \ 727#define tgt_err(t, fmt, ...) \
708 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 728 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
709 729
730#define tgt_log(t, level, fmt, ...) \
731 do { \
732 if ((t)->vhost->log_level >= level) \
733 tgt_err(t, fmt, ##__VA_ARGS__); \
734 } while (0)
735
710#define ibmvfc_dbg(vhost, ...) \ 736#define ibmvfc_dbg(vhost, ...) \
711 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) 737 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
712 738
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index c9aa7611e408..11d2602ae88e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -70,6 +70,7 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/of.h>
73#include <asm/firmware.h> 74#include <asm/firmware.h>
74#include <asm/vio.h> 75#include <asm/vio.h>
75#include <asm/firmware.h> 76#include <asm/firmware.h>
@@ -87,9 +88,15 @@
87 */ 88 */
88static int max_id = 64; 89static int max_id = 64;
89static int max_channel = 3; 90static int max_channel = 3;
90static int init_timeout = 5; 91static int init_timeout = 300;
92static int login_timeout = 60;
93static int info_timeout = 30;
94static int abort_timeout = 60;
95static int reset_timeout = 60;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 96static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; 97static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
98static int fast_fail = 1;
99static int client_reserve = 1;
93 100
94static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
95 102
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 117MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
111module_param_named(max_requests, max_requests, int, S_IRUGO); 118module_param_named(max_requests, max_requests, int, S_IRUGO);
112MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 119MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
120module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
122module_param_named(client_reserve, client_reserve, int, S_IRUGO );
123MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
113 124
114/* ------------------------------------------------------------ 125/* ------------------------------------------------------------
115 * Routines for the event pool and event structs 126 * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
781/* ------------------------------------------------------------ 792/* ------------------------------------------------------------
782 * Routines for driver initialization 793 * Routines for driver initialization
783 */ 794 */
795
784/** 796/**
785 * adapter_info_rsp: - Handle response to MAD adapter info request 797 * map_persist_bufs: - Pre-map persistent data for adapter logins
786 * @evt_struct: srp_event_struct with the response 798 * @hostdata: ibmvscsi_host_data of host
787 * 799 *
788 * Used as a "done" callback by when sending adapter_info. Gets called 800 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
789 * by ibmvscsi_handle_crq() 801 * Return 1 on error, 0 on success.
790*/ 802 */
791static void adapter_info_rsp(struct srp_event_struct *evt_struct) 803static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
792{ 804{
793 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
794 dma_unmap_single(hostdata->dev,
795 evt_struct->iu.mad.adapter_info.buffer,
796 evt_struct->iu.mad.adapter_info.common.length,
797 DMA_BIDIRECTIONAL);
798 805
799 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 806 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
800 dev_err(hostdata->dev, "error %d getting adapter info\n", 807 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
801 evt_struct->xfer_iu->mad.adapter_info.common.status); 808
802 } else { 809 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
803 dev_info(hostdata->dev, "host srp version: %s, " 810 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
804 "host partition %s (%d), OS %d, max io %u\n", 811 return 1;
805 hostdata->madapter_info.srp_version,
806 hostdata->madapter_info.partition_name,
807 hostdata->madapter_info.partition_number,
808 hostdata->madapter_info.os_type,
809 hostdata->madapter_info.port_max_txu[0]);
810
811 if (hostdata->madapter_info.port_max_txu[0])
812 hostdata->host->max_sectors =
813 hostdata->madapter_info.port_max_txu[0] >> 9;
814
815 if (hostdata->madapter_info.os_type == 3 &&
816 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
817 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
818 hostdata->madapter_info.srp_version);
819 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
820 MAX_INDIRECT_BUFS);
821 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
822 }
823 } 812 }
813
814 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
815 &hostdata->madapter_info,
816 sizeof(hostdata->madapter_info),
817 DMA_BIDIRECTIONAL);
818 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
819 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
820 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
821 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
822 return 1;
823 }
824
825 return 0;
824} 826}
825 827
826/** 828/**
827 * send_mad_adapter_info: - Sends the mad adapter info request 829 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
828 * and stores the result so it can be retrieved with 830 * @hostdata: ibmvscsi_host_data of host
829 * sysfs. We COULD consider causing a failure if the 831 *
830 * returned SRP version doesn't match ours. 832 * Unmap the capabilities and adapter info DMA buffers
831 * @hostdata: ibmvscsi_host_data of host 833 */
832 * 834static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
833 * Returns zero if successful.
834*/
835static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
836{ 835{
837 struct viosrp_adapter_info *req; 836 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
838 struct srp_event_struct *evt_struct; 837 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
839 unsigned long flags;
840 dma_addr_t addr;
841
842 evt_struct = get_event_struct(&hostdata->pool);
843 if (!evt_struct) {
844 dev_err(hostdata->dev,
845 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
846 return;
847 }
848
849 init_event_struct(evt_struct,
850 adapter_info_rsp,
851 VIOSRP_MAD_FORMAT,
852 init_timeout);
853
854 req = &evt_struct->iu.mad.adapter_info;
855 memset(req, 0x00, sizeof(*req));
856
857 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
858 req->common.length = sizeof(hostdata->madapter_info);
859 req->buffer = addr = dma_map_single(hostdata->dev,
860 &hostdata->madapter_info,
861 sizeof(hostdata->madapter_info),
862 DMA_BIDIRECTIONAL);
863 838
864 if (dma_mapping_error(hostdata->dev, req->buffer)) { 839 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
865 if (!firmware_has_feature(FW_FEATURE_CMO)) 840 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
866 dev_err(hostdata->dev, 841}
867 "Unable to map request_buffer for "
868 "adapter_info!\n");
869 free_event_struct(&hostdata->pool, evt_struct);
870 return;
871 }
872
873 spin_lock_irqsave(hostdata->host->host_lock, flags);
874 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
875 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
876 dma_unmap_single(hostdata->dev,
877 addr,
878 sizeof(hostdata->madapter_info),
879 DMA_BIDIRECTIONAL);
880 }
881 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
882};
883 842
884/** 843/**
885 * login_rsp: - Handle response to SRP login request 844 * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
909 } 868 }
910 869
911 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); 870 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
912 871 hostdata->client_migrated = 0;
913 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
914 dev_err(hostdata->dev, "Invalid request_limit.\n");
915 872
916 /* Now we know what the real request-limit is. 873 /* Now we know what the real request-limit is.
917 * This value is set rather than added to request_limit because 874 * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
922 879
923 /* If we had any pending I/Os, kick them */ 880 /* If we had any pending I/Os, kick them */
924 scsi_unblock_requests(hostdata->host); 881 scsi_unblock_requests(hostdata->host);
925
926 send_mad_adapter_info(hostdata);
927 return;
928} 882}
929 883
930/** 884/**
931 * send_srp_login: - Sends the srp login 885 * send_srp_login: - Sends the srp login
932 * @hostdata: ibmvscsi_host_data of host 886 * @hostdata: ibmvscsi_host_data of host
933 * 887 *
934 * Returns zero if successful. 888 * Returns zero if successful.
935*/ 889*/
936static int send_srp_login(struct ibmvscsi_host_data *hostdata) 890static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
939 unsigned long flags; 893 unsigned long flags;
940 struct srp_login_req *login; 894 struct srp_login_req *login;
941 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 895 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
942 if (!evt_struct) {
943 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
944 return FAILED;
945 }
946 896
947 init_event_struct(evt_struct, 897 BUG_ON(!evt_struct);
948 login_rsp, 898 init_event_struct(evt_struct, login_rsp,
949 VIOSRP_SRP_FORMAT, 899 VIOSRP_SRP_FORMAT, login_timeout);
950 init_timeout);
951 900
952 login = &evt_struct->iu.srp.login_req; 901 login = &evt_struct->iu.srp.login_req;
953 memset(login, 0x00, sizeof(struct srp_login_req)); 902 memset(login, 0, sizeof(*login));
954 login->opcode = SRP_LOGIN_REQ; 903 login->opcode = SRP_LOGIN_REQ;
955 login->req_it_iu_len = sizeof(union srp_iu); 904 login->req_it_iu_len = sizeof(union srp_iu);
956 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 905 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
957 906
958 spin_lock_irqsave(hostdata->host->host_lock, flags); 907 spin_lock_irqsave(hostdata->host->host_lock, flags);
959 /* Start out with a request limit of 0, since this is negotiated in 908 /* Start out with a request limit of 0, since this is negotiated in
960 * the login request we are just sending and login requests always 909 * the login request we are just sending and login requests always
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
962 */ 911 */
963 atomic_set(&hostdata->request_limit, 0); 912 atomic_set(&hostdata->request_limit, 0);
964 913
965 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 914 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
966 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 915 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
967 dev_info(hostdata->dev, "sent SRP login\n"); 916 dev_info(hostdata->dev, "sent SRP login\n");
968 return rc; 917 return rc;
969}; 918};
970 919
971/** 920/**
921 * capabilities_rsp: - Handle response to MAD adapter capabilities request
922 * @evt_struct: srp_event_struct with the response
923 *
924 * Used as a "done" callback by when sending adapter_info.
925 */
926static void capabilities_rsp(struct srp_event_struct *evt_struct)
927{
928 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
929
930 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
931 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
932 evt_struct->xfer_iu->mad.capabilities.common.status);
933 } else {
934 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
935 dev_info(hostdata->dev, "Partition migration not supported\n");
936
937 if (client_reserve) {
938 if (hostdata->caps.reserve.common.server_support ==
939 SERVER_SUPPORTS_CAP)
940 dev_info(hostdata->dev, "Client reserve enabled\n");
941 else
942 dev_info(hostdata->dev, "Client reserve not supported\n");
943 }
944 }
945
946 send_srp_login(hostdata);
947}
948
949/**
950 * send_mad_capabilities: - Sends the mad capabilities request
951 * and stores the result so it can be retrieved with
952 * @hostdata: ibmvscsi_host_data of host
953 */
954static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
955{
956 struct viosrp_capabilities *req;
957 struct srp_event_struct *evt_struct;
958 unsigned long flags;
959 struct device_node *of_node = hostdata->dev->archdata.of_node;
960 const char *location;
961
962 evt_struct = get_event_struct(&hostdata->pool);
963 BUG_ON(!evt_struct);
964
965 init_event_struct(evt_struct, capabilities_rsp,
966 VIOSRP_MAD_FORMAT, info_timeout);
967
968 req = &evt_struct->iu.mad.capabilities;
969 memset(req, 0, sizeof(*req));
970
971 hostdata->caps.flags = CAP_LIST_SUPPORTED;
972 if (hostdata->client_migrated)
973 hostdata->caps.flags |= CLIENT_MIGRATED;
974
975 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
976 sizeof(hostdata->caps.name));
977 hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
978
979 location = of_get_property(of_node, "ibm,loc-code", NULL);
980 location = location ? location : dev_name(hostdata->dev);
981 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
982 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
983
984 req->common.type = VIOSRP_CAPABILITIES_TYPE;
985 req->buffer = hostdata->caps_addr;
986
987 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
988 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
989 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
990 hostdata->caps.migration.ecl = 1;
991
992 if (client_reserve) {
993 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
994 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
995 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
996 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
997 req->common.length = sizeof(hostdata->caps);
998 } else
999 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
1000
1001 spin_lock_irqsave(hostdata->host->host_lock, flags);
1002 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1003 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1004 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1005};
1006
1007/**
1008 * fast_fail_rsp: - Handle response to MAD enable fast fail
1009 * @evt_struct: srp_event_struct with the response
1010 *
1011 * Used as a "done" callback by when sending enable fast fail. Gets called
1012 * by ibmvscsi_handle_crq()
1013 */
1014static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1015{
1016 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1017 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
1018
1019 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1020 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1021 else if (status == VIOSRP_MAD_FAILED)
1022 dev_err(hostdata->dev, "fast_fail request failed\n");
1023 else if (status != VIOSRP_MAD_SUCCESS)
1024 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1025
1026 send_mad_capabilities(hostdata);
1027}
1028
1029/**
1030 * init_host - Start host initialization
1031 * @hostdata: ibmvscsi_host_data of host
1032 *
1033 * Returns zero if successful.
1034 */
1035static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1036{
1037 int rc;
1038 unsigned long flags;
1039 struct viosrp_fast_fail *fast_fail_mad;
1040 struct srp_event_struct *evt_struct;
1041
1042 if (!fast_fail) {
1043 send_mad_capabilities(hostdata);
1044 return 0;
1045 }
1046
1047 evt_struct = get_event_struct(&hostdata->pool);
1048 BUG_ON(!evt_struct);
1049
1050 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1051
1052 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1053 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1054 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
1055 fast_fail_mad->common.length = sizeof(*fast_fail_mad);
1056
1057 spin_lock_irqsave(hostdata->host->host_lock, flags);
1058 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1059 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1060 return rc;
1061}
1062
1063/**
1064 * adapter_info_rsp: - Handle response to MAD adapter info request
1065 * @evt_struct: srp_event_struct with the response
1066 *
1067 * Used as a "done" callback by when sending adapter_info. Gets called
1068 * by ibmvscsi_handle_crq()
1069*/
1070static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1071{
1072 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1073
1074 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1075 dev_err(hostdata->dev, "error %d getting adapter info\n",
1076 evt_struct->xfer_iu->mad.adapter_info.common.status);
1077 } else {
1078 dev_info(hostdata->dev, "host srp version: %s, "
1079 "host partition %s (%d), OS %d, max io %u\n",
1080 hostdata->madapter_info.srp_version,
1081 hostdata->madapter_info.partition_name,
1082 hostdata->madapter_info.partition_number,
1083 hostdata->madapter_info.os_type,
1084 hostdata->madapter_info.port_max_txu[0]);
1085
1086 if (hostdata->madapter_info.port_max_txu[0])
1087 hostdata->host->max_sectors =
1088 hostdata->madapter_info.port_max_txu[0] >> 9;
1089
1090 if (hostdata->madapter_info.os_type == 3 &&
1091 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1092 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1093 hostdata->madapter_info.srp_version);
1094 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 }
1098 }
1099
1100 enable_fast_fail(hostdata);
1101}
1102
1103/**
1104 * send_mad_adapter_info: - Sends the mad adapter info request
1105 * and stores the result so it can be retrieved with
1106 * sysfs. We COULD consider causing a failure if the
1107 * returned SRP version doesn't match ours.
1108 * @hostdata: ibmvscsi_host_data of host
1109 *
1110 * Returns zero if successful.
1111*/
1112static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1113{
1114 struct viosrp_adapter_info *req;
1115 struct srp_event_struct *evt_struct;
1116 unsigned long flags;
1117
1118 evt_struct = get_event_struct(&hostdata->pool);
1119 BUG_ON(!evt_struct);
1120
1121 init_event_struct(evt_struct,
1122 adapter_info_rsp,
1123 VIOSRP_MAD_FORMAT,
1124 info_timeout);
1125
1126 req = &evt_struct->iu.mad.adapter_info;
1127 memset(req, 0x00, sizeof(*req));
1128
1129 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
1130 req->common.length = sizeof(hostdata->madapter_info);
1131 req->buffer = hostdata->adapter_info_addr;
1132
1133 spin_lock_irqsave(hostdata->host->host_lock, flags);
1134 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1135 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1136 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1137};
1138
1139/**
1140 * init_adapter: Start virtual adapter initialization sequence
1141 *
1142 */
1143static void init_adapter(struct ibmvscsi_host_data *hostdata)
1144{
1145 send_mad_adapter_info(hostdata);
1146}
1147
1148/**
972 * sync_completion: Signal that a synchronous command has completed 1149 * sync_completion: Signal that a synchronous command has completed
973 * Note that after returning from this call, the evt_struct is freed. 1150 * Note that after returning from this call, the evt_struct is freed.
974 * the caller waiting on this completion shouldn't touch the evt_struct 1151 * the caller waiting on this completion shouldn't touch the evt_struct
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1029 init_event_struct(evt, 1206 init_event_struct(evt,
1030 sync_completion, 1207 sync_completion,
1031 VIOSRP_SRP_FORMAT, 1208 VIOSRP_SRP_FORMAT,
1032 init_timeout); 1209 abort_timeout);
1033 1210
1034 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1211 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1035 1212
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1043 evt->sync_srp = &srp_rsp; 1220 evt->sync_srp = &srp_rsp;
1044 1221
1045 init_completion(&evt->comp); 1222 init_completion(&evt->comp);
1046 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1223 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1047 1224
1048 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1225 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1049 break; 1226 break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1152 init_event_struct(evt, 1329 init_event_struct(evt,
1153 sync_completion, 1330 sync_completion,
1154 VIOSRP_SRP_FORMAT, 1331 VIOSRP_SRP_FORMAT,
1155 init_timeout); 1332 reset_timeout);
1156 1333
1157 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1334 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1158 1335
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1165 evt->sync_srp = &srp_rsp; 1342 evt->sync_srp = &srp_rsp;
1166 1343
1167 init_completion(&evt->comp); 1344 init_completion(&evt->comp);
1168 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1345 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1169 1346
1170 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1347 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1171 break; 1348 break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1281 if ((rc = ibmvscsi_ops->send_crq(hostdata, 1458 if ((rc = ibmvscsi_ops->send_crq(hostdata,
1282 0xC002000000000000LL, 0)) == 0) { 1459 0xC002000000000000LL, 0)) == 0) {
1283 /* Now login */ 1460 /* Now login */
1284 send_srp_login(hostdata); 1461 init_adapter(hostdata);
1285 } else { 1462 } else {
1286 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); 1463 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1287 } 1464 }
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1291 dev_info(hostdata->dev, "partner initialization complete\n"); 1468 dev_info(hostdata->dev, "partner initialization complete\n");
1292 1469
1293 /* Now login */ 1470 /* Now login */
1294 send_srp_login(hostdata); 1471 init_adapter(hostdata);
1295 break; 1472 break;
1296 default: 1473 default:
1297 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); 1474 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1303 if (crq->format == 0x06) { 1480 if (crq->format == 0x06) {
1304 /* We need to re-setup the interpartition connection */ 1481 /* We need to re-setup the interpartition connection */
1305 dev_info(hostdata->dev, "Re-enabling adapter!\n"); 1482 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1483 hostdata->client_migrated = 1;
1306 purge_requests(hostdata, DID_REQUEUE); 1484 purge_requests(hostdata, DID_REQUEUE);
1307 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, 1485 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
1308 hostdata)) || 1486 hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1397 init_event_struct(evt_struct, 1575 init_event_struct(evt_struct,
1398 sync_completion, 1576 sync_completion,
1399 VIOSRP_MAD_FORMAT, 1577 VIOSRP_MAD_FORMAT,
1400 init_timeout); 1578 info_timeout);
1401 1579
1402 host_config = &evt_struct->iu.mad.host_config; 1580 host_config = &evt_struct->iu.mad.host_config;
1403 1581
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1419 1597
1420 init_completion(&evt_struct->comp); 1598 init_completion(&evt_struct->comp);
1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1599 spin_lock_irqsave(hostdata->host->host_lock, flags);
1422 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 1600 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1423 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1601 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1424 if (rc == 0) 1602 if (rc == 0)
1425 wait_for_completion(&evt_struct->comp); 1603 wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1444 spin_lock_irqsave(shost->host_lock, lock_flags); 1622 spin_lock_irqsave(shost->host_lock, lock_flags);
1445 if (sdev->type == TYPE_DISK) { 1623 if (sdev->type == TYPE_DISK) {
1446 sdev->allow_restart = 1; 1624 sdev->allow_restart = 1;
1447 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1625 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1448 } 1626 }
1449 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1627 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1450 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1628 spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1471/* ------------------------------------------------------------ 1649/* ------------------------------------------------------------
1472 * sysfs attributes 1650 * sysfs attributes
1473 */ 1651 */
1652static ssize_t show_host_vhost_loc(struct device *dev,
1653 struct device_attribute *attr, char *buf)
1654{
1655 struct Scsi_Host *shost = class_to_shost(dev);
1656 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1657 int len;
1658
1659 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1660 hostdata->caps.loc);
1661 return len;
1662}
1663
1664static struct device_attribute ibmvscsi_host_vhost_loc = {
1665 .attr = {
1666 .name = "vhost_loc",
1667 .mode = S_IRUGO,
1668 },
1669 .show = show_host_vhost_loc,
1670};
1671
1672static ssize_t show_host_vhost_name(struct device *dev,
1673 struct device_attribute *attr, char *buf)
1674{
1675 struct Scsi_Host *shost = class_to_shost(dev);
1676 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1677 int len;
1678
1679 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1680 hostdata->caps.name);
1681 return len;
1682}
1683
1684static struct device_attribute ibmvscsi_host_vhost_name = {
1685 .attr = {
1686 .name = "vhost_name",
1687 .mode = S_IRUGO,
1688 },
1689 .show = show_host_vhost_name,
1690};
1691
1474static ssize_t show_host_srp_version(struct device *dev, 1692static ssize_t show_host_srp_version(struct device *dev,
1475 struct device_attribute *attr, char *buf) 1693 struct device_attribute *attr, char *buf)
1476{ 1694{
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
1594}; 1812};
1595 1813
1596static struct device_attribute *ibmvscsi_attrs[] = { 1814static struct device_attribute *ibmvscsi_attrs[] = {
1815 &ibmvscsi_host_vhost_loc,
1816 &ibmvscsi_host_vhost_name,
1597 &ibmvscsi_host_srp_version, 1817 &ibmvscsi_host_srp_version,
1598 &ibmvscsi_host_partition_name, 1818 &ibmvscsi_host_partition_name,
1599 &ibmvscsi_host_partition_number, 1819 &ibmvscsi_host_partition_number,
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1674 atomic_set(&hostdata->request_limit, -1); 1894 atomic_set(&hostdata->request_limit, -1);
1675 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1895 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1676 1896
1897 if (map_persist_bufs(hostdata)) {
1898 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
1899 goto persist_bufs_failed;
1900 }
1901
1677 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 1902 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1678 if (rc != 0 && rc != H_RESOURCE) { 1903 if (rc != 0 && rc != H_RESOURCE) {
1679 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1904 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1687 host->max_lun = 8; 1912 host->max_lun = 8;
1688 host->max_id = max_id; 1913 host->max_id = max_id;
1689 host->max_channel = max_channel; 1914 host->max_channel = max_channel;
1915 host->max_cmd_len = 16;
1690 1916
1691 if (scsi_add_host(hostdata->host, hostdata->dev)) 1917 if (scsi_add_host(hostdata->host, hostdata->dev))
1692 goto add_host_failed; 1918 goto add_host_failed;
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1733 init_pool_failed: 1959 init_pool_failed:
1734 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 1960 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1735 init_crq_failed: 1961 init_crq_failed:
1962 unmap_persist_bufs(hostdata);
1963 persist_bufs_failed:
1736 scsi_host_put(host); 1964 scsi_host_put(host);
1737 scsi_host_alloc_failed: 1965 scsi_host_alloc_failed:
1738 return -1; 1966 return -1;
@@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1741static int ibmvscsi_remove(struct vio_dev *vdev) 1969static int ibmvscsi_remove(struct vio_dev *vdev)
1742{ 1970{
1743 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1971 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1972 unmap_persist_bufs(hostdata);
1744 release_event_pool(&hostdata->pool, hostdata); 1973 release_event_pool(&hostdata->pool, hostdata);
1745 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1746 max_events); 1975 max_events);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 2d4339d5e16e..76425303def0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
90/* all driver data associated with a host adapter */ 90/* all driver data associated with a host adapter */
91struct ibmvscsi_host_data { 91struct ibmvscsi_host_data {
92 atomic_t request_limit; 92 atomic_t request_limit;
93 int client_migrated;
93 struct device *dev; 94 struct device *dev;
94 struct event_pool pool; 95 struct event_pool pool;
95 struct crq_queue queue; 96 struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
97 struct list_head sent; 98 struct list_head sent;
98 struct Scsi_Host *host; 99 struct Scsi_Host *host;
99 struct mad_adapter_info_data madapter_info; 100 struct mad_adapter_info_data madapter_info;
101 struct capabilities caps;
102 dma_addr_t caps_addr;
103 dma_addr_t adapter_info_addr;
100}; 104};
101 105
102/* routines for managing a command/response queue */ 106/* routines for managing a command/response queue */
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 204604501ad8..2cd735d1d196 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -37,6 +37,7 @@
37 37
38#define SRP_VERSION "16.a" 38#define SRP_VERSION "16.a"
39#define SRP_MAX_IU_LEN 256 39#define SRP_MAX_IU_LEN 256
40#define SRP_MAX_LOC_LEN 32
40 41
41union srp_iu { 42union srp_iu {
42 struct srp_login_req login_req; 43 struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
86 VIOSRP_EMPTY_IU_TYPE = 0x01, 87 VIOSRP_EMPTY_IU_TYPE = 0x01,
87 VIOSRP_ERROR_LOG_TYPE = 0x02, 88 VIOSRP_ERROR_LOG_TYPE = 0x02,
88 VIOSRP_ADAPTER_INFO_TYPE = 0x03, 89 VIOSRP_ADAPTER_INFO_TYPE = 0x03,
89 VIOSRP_HOST_CONFIG_TYPE = 0x04 90 VIOSRP_HOST_CONFIG_TYPE = 0x04,
91 VIOSRP_CAPABILITIES_TYPE = 0x05,
92 VIOSRP_ENABLE_FAST_FAIL = 0x08,
93};
94
95enum viosrp_mad_status {
96 VIOSRP_MAD_SUCCESS = 0x00,
97 VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
98 VIOSRP_MAD_FAILED = 0xF7,
99};
100
101enum viosrp_capability_type {
102 MIGRATION_CAPABILITIES = 0x01,
103 RESERVATION_CAPABILITIES = 0x02,
104};
105
106enum viosrp_capability_support {
107 SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
108 SERVER_SUPPORTS_CAP = 0x01,
109 SERVER_CAP_DATA = 0x02,
110};
111
112enum viosrp_reserve_type {
113 CLIENT_RESERVE_SCSI_2 = 0x01,
114};
115
116enum viosrp_capability_flag {
117 CLIENT_MIGRATED = 0x01,
118 CLIENT_RECONNECT = 0x02,
119 CAP_LIST_SUPPORTED = 0x04,
120 CAP_LIST_DATA = 0x08,
90}; 121};
91 122
92/* 123/*
@@ -127,11 +158,46 @@ struct viosrp_host_config {
127 u64 buffer; 158 u64 buffer;
128}; 159};
129 160
161struct viosrp_fast_fail {
162 struct mad_common common;
163};
164
165struct viosrp_capabilities {
166 struct mad_common common;
167 u64 buffer;
168};
169
170struct mad_capability_common {
171 u32 cap_type;
172 u16 length;
173 u16 server_support;
174};
175
176struct mad_reserve_cap {
177 struct mad_capability_common common;
178 u32 type;
179};
180
181struct mad_migration_cap {
182 struct mad_capability_common common;
183 u32 ecl;
184};
185
186struct capabilities{
187 u32 flags;
188 char name[SRP_MAX_LOC_LEN];
189 char loc[SRP_MAX_LOC_LEN];
190 struct mad_migration_cap migration;
191 struct mad_reserve_cap reserve;
192};
193
130union mad_iu { 194union mad_iu {
131 struct viosrp_empty_iu empty_iu; 195 struct viosrp_empty_iu empty_iu;
132 struct viosrp_error_log error_log; 196 struct viosrp_error_log error_log;
133 struct viosrp_adapter_info adapter_info; 197 struct viosrp_adapter_info adapter_info;
134 struct viosrp_host_config host_config; 198 struct viosrp_host_config host_config;
199 struct viosrp_fast_fail fast_fail;
200 struct viosrp_capabilities capabilities;
135}; 201};
136 202
137union viosrp_iu { 203union viosrp_iu {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dd689ded8609..0f8bc772b112 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
7003 ioa_cfg->sdt_state = ABORT_DUMP; 7003 ioa_cfg->sdt_state = ABORT_DUMP;
7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7005 ioa_cfg->in_ioa_bringdown = 1; 7005 ioa_cfg->in_ioa_bringdown = 1;
7006 ioa_cfg->allow_cmds = 0;
7006 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7007 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 7008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7008} 7009}
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
7688 * Return value: 7689 * Return value:
7689 * none 7690 * none
7690 **/ 7691 **/
7691static void ipr_remove(struct pci_dev *pdev) 7692static void __devexit ipr_remove(struct pci_dev *pdev)
7692{ 7693{
7693 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 7694 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7694 7695
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
7864 .name = IPR_NAME, 7865 .name = IPR_NAME,
7865 .id_table = ipr_pci_table, 7866 .id_table = ipr_pci_table,
7866 .probe = ipr_probe, 7867 .probe = ipr_probe,
7867 .remove = ipr_remove, 7868 .remove = __devexit_p(ipr_remove),
7868 .shutdown = ipr_shutdown, 7869 .shutdown = ipr_shutdown,
7869 .err_handler = &ipr_err_handler, 7870 .err_handler = &ipr_err_handler,
7870}; 7871};
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 992af05aacf1..7af9bceb8aa9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1159 atomic_inc(&mp->stats.xid_not_found); 1159 atomic_inc(&mp->stats.xid_not_found);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 if (ep->esb_stat & ESB_ST_COMPLETE) {
1163 atomic_inc(&mp->stats.xid_not_found);
1164 goto out;
1165 }
1162 if (ep->rxid == FC_XID_UNKNOWN) 1166 if (ep->rxid == FC_XID_UNKNOWN)
1163 ep->rxid = ntohs(fh->fh_rx_id); 1167 ep->rxid = ntohs(fh->fh_rx_id);
1164 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1168 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 521f996f9b13..ad8b747837b0 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1897 break; 1897 break;
1898 case FC_CMD_ABORTED: 1898 case FC_CMD_ABORTED:
1899 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1899 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1900 break; 1900 break;
1901 case FC_CMD_TIME_OUT: 1901 case FC_CMD_TIME_OUT:
1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 747d73c5c8af..7bfbff7e0efb 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
478 if (PTR_ERR(fp) == -FC_EX_CLOSED) 478 if (PTR_ERR(fp) == -FC_EX_CLOSED)
479 return fc_rport_error(rport, fp); 479 return fc_rport_error(rport, fp);
480 480
481 if (rdata->retries < rdata->local_port->max_retry_count) { 481 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", 482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
483 PTR_ERR(fp), fc_rport_state(rport)); 483 PTR_ERR(fp), fc_rport_state(rport));
484 rdata->retries++; 484 rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
1330} 1330}
1331EXPORT_SYMBOL(fc_rport_init); 1331EXPORT_SYMBOL(fc_rport_init);
1332 1332
1333int fc_setup_rport() 1333int fc_setup_rport(void)
1334{ 1334{
1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); 1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1336 if (!rport_event_queue) 1336 if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
1339} 1339}
1340EXPORT_SYMBOL(fc_setup_rport); 1340EXPORT_SYMBOL(fc_setup_rport);
1341 1341
1342void fc_destroy_rport() 1342void fc_destroy_rport(void)
1343{ 1343{
1344 destroy_workqueue(rport_event_queue); 1344 destroy_workqueue(rport_event_queue);
1345} 1345}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e72b4ad47d35..59908aead531 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
81 struct Scsi_Host *shost = conn->session->host; 81 struct Scsi_Host *shost = conn->session->host;
82 struct iscsi_host *ihost = shost_priv(shost); 82 struct iscsi_host *ihost = shost_priv(shost);
83 83
84 queue_work(ihost->workq, &conn->xmitwork); 84 if (ihost->workq)
85 queue_work(ihost->workq, &conn->xmitwork);
85} 86}
86EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); 87EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
87 88
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
109 * if the window closed with IO queued, then kick the 110 * if the window closed with IO queued, then kick the
110 * xmit thread 111 * xmit thread
111 */ 112 */
112 if (!list_empty(&session->leadconn->xmitqueue) || 113 if (!list_empty(&session->leadconn->cmdqueue) ||
113 !list_empty(&session->leadconn->mgmtqueue)) { 114 !list_empty(&session->leadconn->mgmtqueue))
114 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 115 iscsi_conn_queue_work(session->leadconn);
115 iscsi_conn_queue_work(session->leadconn);
116 }
117 } 116 }
118} 117}
119EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 118EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
257 itt_t itt; 256 itt_t itt;
258 int rc; 257 int rc;
259 258
260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 259 if (conn->session->tt->alloc_pdu) {
261 if (rc) 260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
262 return rc; 261 if (rc)
262 return rc;
263 }
263 hdr = (struct iscsi_cmd *) task->hdr; 264 hdr = (struct iscsi_cmd *) task->hdr;
264 itt = hdr->itt; 265 itt = hdr->itt;
265 memset(hdr, 0, sizeof(*hdr)); 266 memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
364 return -EIO; 365 return -EIO;
365 366
366 task->state = ISCSI_TASK_RUNNING; 367 task->state = ISCSI_TASK_RUNNING;
367 list_move_tail(&task->running, &conn->run_list);
368 368
369 conn->scsicmd_pdus_cnt++; 369 conn->scsicmd_pdus_cnt++;
370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
380} 380}
381 381
382/** 382/**
383 * iscsi_complete_command - finish a task 383 * iscsi_free_task - free a task
384 * @task: iscsi cmd task 384 * @task: iscsi cmd task
385 * 385 *
386 * Must be called with session lock. 386 * Must be called with session lock.
387 * This function returns the scsi command to scsi-ml or cleans 387 * This function returns the scsi command to scsi-ml or cleans
388 * up mgmt tasks then returns the task to the pool. 388 * up mgmt tasks then returns the task to the pool.
389 */ 389 */
390static void iscsi_complete_command(struct iscsi_task *task) 390static void iscsi_free_task(struct iscsi_task *task)
391{ 391{
392 struct iscsi_conn *conn = task->conn; 392 struct iscsi_conn *conn = task->conn;
393 struct iscsi_session *session = conn->session; 393 struct iscsi_session *session = conn->session;
394 struct scsi_cmnd *sc = task->sc; 394 struct scsi_cmnd *sc = task->sc;
395 395
396 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
397 task->itt, task->state, task->sc);
398
396 session->tt->cleanup_task(task); 399 session->tt->cleanup_task(task);
397 list_del_init(&task->running); 400 task->state = ISCSI_TASK_FREE;
398 task->state = ISCSI_TASK_COMPLETED;
399 task->sc = NULL; 401 task->sc = NULL;
400
401 if (conn->task == task)
402 conn->task = NULL;
403 /* 402 /*
404 * login task is preallocated so do not free 403 * login task is preallocated so do not free
405 */ 404 */
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
408 407
409 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 408 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
410 409
411 if (conn->ping_task == task)
412 conn->ping_task = NULL;
413
414 if (sc) { 410 if (sc) {
415 task->sc = NULL; 411 task->sc = NULL;
416 /* SCSI eh reuses commands to verify us */ 412 /* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
433static void __iscsi_put_task(struct iscsi_task *task) 429static void __iscsi_put_task(struct iscsi_task *task)
434{ 430{
435 if (atomic_dec_and_test(&task->refcount)) 431 if (atomic_dec_and_test(&task->refcount))
436 iscsi_complete_command(task); 432 iscsi_free_task(task);
437} 433}
438 434
439void iscsi_put_task(struct iscsi_task *task) 435void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
446} 442}
447EXPORT_SYMBOL_GPL(iscsi_put_task); 443EXPORT_SYMBOL_GPL(iscsi_put_task);
448 444
445/**
446 * iscsi_complete_task - finish a task
447 * @task: iscsi cmd task
448 * @state: state to complete task with
449 *
450 * Must be called with session lock.
451 */
452static void iscsi_complete_task(struct iscsi_task *task, int state)
453{
454 struct iscsi_conn *conn = task->conn;
455
456 ISCSI_DBG_SESSION(conn->session,
457 "complete task itt 0x%x state %d sc %p\n",
458 task->itt, task->state, task->sc);
459 if (task->state == ISCSI_TASK_COMPLETED ||
460 task->state == ISCSI_TASK_ABRT_TMF ||
461 task->state == ISCSI_TASK_ABRT_SESS_RECOV)
462 return;
463 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
464 task->state = state;
465
466 if (!list_empty(&task->running))
467 list_del_init(&task->running);
468
469 if (conn->task == task)
470 conn->task = NULL;
471
472 if (conn->ping_task == task)
473 conn->ping_task = NULL;
474
475 /* release get from queueing */
476 __iscsi_put_task(task);
477}
478
449/* 479/*
450 * session lock must be held 480 * session lock must be held and if not called for a task that is
481 * still pending or from the xmit thread, then xmit thread must
482 * be suspended.
451 */ 483 */
452static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 484static void fail_scsi_task(struct iscsi_task *task, int err)
453 int err)
454{ 485{
486 struct iscsi_conn *conn = task->conn;
455 struct scsi_cmnd *sc; 487 struct scsi_cmnd *sc;
488 int state;
456 489
490 /*
491 * if a command completes and we get a successful tmf response
492 * we will hit this because the scsi eh abort code does not take
493 * a ref to the task.
494 */
457 sc = task->sc; 495 sc = task->sc;
458 if (!sc) 496 if (!sc)
459 return; 497 return;
460 498
461 if (task->state == ISCSI_TASK_PENDING) 499 if (task->state == ISCSI_TASK_PENDING) {
462 /* 500 /*
463 * cmd never made it to the xmit thread, so we should not count 501 * cmd never made it to the xmit thread, so we should not count
464 * the cmd in the sequencing 502 * the cmd in the sequencing
465 */ 503 */
466 conn->session->queued_cmdsn--; 504 conn->session->queued_cmdsn--;
505 /* it was never sent so just complete like normal */
506 state = ISCSI_TASK_COMPLETED;
507 } else if (err == DID_TRANSPORT_DISRUPTED)
508 state = ISCSI_TASK_ABRT_SESS_RECOV;
509 else
510 state = ISCSI_TASK_ABRT_TMF;
467 511
468 sc->result = err; 512 sc->result = err << 16;
469 if (!scsi_bidi_cmnd(sc)) 513 if (!scsi_bidi_cmnd(sc))
470 scsi_set_resid(sc, scsi_bufflen(sc)); 514 scsi_set_resid(sc, scsi_bufflen(sc));
471 else { 515 else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
473 scsi_in(sc)->resid = scsi_in(sc)->length; 517 scsi_in(sc)->resid = scsi_in(sc)->length;
474 } 518 }
475 519
476 if (conn->task == task) 520 iscsi_complete_task(task, state);
477 conn->task = NULL;
478 /* release ref from queuecommand */
479 __iscsi_put_task(task);
480} 521}
481 522
482static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 523static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
516 session->state = ISCSI_STATE_LOGGING_OUT; 557 session->state = ISCSI_STATE_LOGGING_OUT;
517 558
518 task->state = ISCSI_TASK_RUNNING; 559 task->state = ISCSI_TASK_RUNNING;
519 list_move_tail(&task->running, &conn->mgmt_run_list);
520 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " 560 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
521 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, 561 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
522 hdr->itt, task->data_count); 562 hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
528 char *data, uint32_t data_size) 568 char *data, uint32_t data_size)
529{ 569{
530 struct iscsi_session *session = conn->session; 570 struct iscsi_session *session = conn->session;
571 struct iscsi_host *ihost = shost_priv(session->host);
531 struct iscsi_task *task; 572 struct iscsi_task *task;
532 itt_t itt; 573 itt_t itt;
533 574
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
544 */ 585 */
545 task = conn->login_task; 586 task = conn->login_task;
546 else { 587 else {
588 if (session->state != ISCSI_STATE_LOGGED_IN)
589 return NULL;
590
547 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 591 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
548 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 592 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
549 593
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
559 atomic_set(&task->refcount, 1); 603 atomic_set(&task->refcount, 1);
560 task->conn = conn; 604 task->conn = conn;
561 task->sc = NULL; 605 task->sc = NULL;
606 INIT_LIST_HEAD(&task->running);
607 task->state = ISCSI_TASK_PENDING;
562 608
563 if (data_size) { 609 if (data_size) {
564 memcpy(task->data, data, data_size); 610 memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
566 } else 612 } else
567 task->data_count = 0; 613 task->data_count = 0;
568 614
569 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { 615 if (conn->session->tt->alloc_pdu) {
570 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " 616 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
571 "pdu for mgmt task.\n"); 617 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
572 goto requeue_task; 618 "pdu for mgmt task.\n");
619 goto free_task;
620 }
573 } 621 }
622
574 itt = task->hdr->itt; 623 itt = task->hdr->itt;
575 task->hdr_len = sizeof(struct iscsi_hdr); 624 task->hdr_len = sizeof(struct iscsi_hdr);
576 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 625 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
583 task->conn->session->age); 632 task->conn->session->age);
584 } 633 }
585 634
586 INIT_LIST_HEAD(&task->running); 635 if (!ihost->workq) {
587 list_add_tail(&task->running, &conn->mgmtqueue);
588
589 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
590 if (iscsi_prep_mgmt_task(conn, task)) 636 if (iscsi_prep_mgmt_task(conn, task))
591 goto free_task; 637 goto free_task;
592 638
593 if (session->tt->xmit_task(task)) 639 if (session->tt->xmit_task(task))
594 goto free_task; 640 goto free_task;
595 641 } else {
596 } else 642 list_add_tail(&task->running, &conn->mgmtqueue);
597 iscsi_conn_queue_work(conn); 643 iscsi_conn_queue_work(conn);
644 }
598 645
599 return task; 646 return task;
600 647
601free_task: 648free_task:
602 __iscsi_put_task(task); 649 __iscsi_put_task(task);
603 return NULL; 650 return NULL;
604
605requeue_task:
606 if (task != conn->login_task)
607 __kfifo_put(session->cmdpool.queue, (void*)&task,
608 sizeof(void*));
609 return NULL;
610} 651}
611 652
612int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 653int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
701 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 742 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
702 } 743 }
703out: 744out:
704 ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", 745 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
705 sc, sc->result, task->itt); 746 sc, sc->result, task->itt);
706 conn->scsirsp_pdus_cnt++; 747 conn->scsirsp_pdus_cnt++;
707 748 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
708 __iscsi_put_task(task);
709} 749}
710 750
711/** 751/**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
724 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 764 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
725 return; 765 return;
726 766
767 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
727 sc->result = (DID_OK << 16) | rhdr->cmd_status; 768 sc->result = (DID_OK << 16) | rhdr->cmd_status;
728 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 769 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
729 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 770 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
738 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 779 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
739 } 780 }
740 781
782 ISCSI_DBG_SESSION(conn->session, "data in with status done "
783 "[sc %p res %d itt 0x%x]\n",
784 sc, sc->result, task->itt);
741 conn->scsirsp_pdus_cnt++; 785 conn->scsirsp_pdus_cnt++;
742 __iscsi_put_task(task); 786 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
743} 787}
744 788
745static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 789static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
823 * 867 *
824 * The session lock must be held. 868 * The session lock must be held.
825 */ 869 */
826static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 870struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
827{ 871{
828 struct iscsi_session *session = conn->session; 872 struct iscsi_session *session = conn->session;
829 int i; 873 int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
840 884
841 return session->cmds[i]; 885 return session->cmds[i];
842} 886}
887EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
843 888
844/** 889/**
845 * __iscsi_complete_pdu - complete pdu 890 * __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
959 } 1004 }
960 1005
961 iscsi_tmf_rsp(conn, hdr); 1006 iscsi_tmf_rsp(conn, hdr);
962 __iscsi_put_task(task); 1007 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
963 break; 1008 break;
964 case ISCSI_OP_NOOP_IN: 1009 case ISCSI_OP_NOOP_IN:
965 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1010 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
977 goto recv_pdu; 1022 goto recv_pdu;
978 1023
979 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 1024 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
980 __iscsi_put_task(task); 1025 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
981 break; 1026 break;
982 default: 1027 default:
983 rc = ISCSI_ERR_BAD_OPCODE; 1028 rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
989recv_pdu: 1034recv_pdu:
990 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1035 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
991 rc = ISCSI_ERR_CONN_FAILED; 1036 rc = ISCSI_ERR_CONN_FAILED;
992 __iscsi_put_task(task); 1037 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
993 return rc; 1038 return rc;
994} 1039}
995EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 1040EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
1166{ 1211{
1167 struct iscsi_conn *conn = task->conn; 1212 struct iscsi_conn *conn = task->conn;
1168 1213
1169 list_move_tail(&task->running, &conn->requeue); 1214 /*
1215 * this may be on the requeue list already if the xmit_task callout
1216 * is handling the r2ts while we are adding new ones
1217 */
1218 if (list_empty(&task->running))
1219 list_add_tail(&task->running, &conn->requeue);
1170 iscsi_conn_queue_work(conn); 1220 iscsi_conn_queue_work(conn);
1171} 1221}
1172EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1222EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
1206 while (!list_empty(&conn->mgmtqueue)) { 1256 while (!list_empty(&conn->mgmtqueue)) {
1207 conn->task = list_entry(conn->mgmtqueue.next, 1257 conn->task = list_entry(conn->mgmtqueue.next,
1208 struct iscsi_task, running); 1258 struct iscsi_task, running);
1259 list_del_init(&conn->task->running);
1209 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1260 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1210 __iscsi_put_task(conn->task); 1261 __iscsi_put_task(conn->task);
1211 conn->task = NULL; 1262 conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
1217 } 1268 }
1218 1269
1219 /* process pending command queue */ 1270 /* process pending command queue */
1220 while (!list_empty(&conn->xmitqueue)) { 1271 while (!list_empty(&conn->cmdqueue)) {
1221 if (conn->tmf_state == TMF_QUEUED) 1272 if (conn->tmf_state == TMF_QUEUED)
1222 break; 1273 break;
1223 1274
1224 conn->task = list_entry(conn->xmitqueue.next, 1275 conn->task = list_entry(conn->cmdqueue.next,
1225 struct iscsi_task, running); 1276 struct iscsi_task, running);
1277 list_del_init(&conn->task->running);
1226 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1278 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1227 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1279 fail_scsi_task(conn->task, DID_IMM_RETRY);
1228 continue; 1280 continue;
1229 } 1281 }
1230 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1282 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1231 if (rc) { 1283 if (rc) {
1232 if (rc == -ENOMEM) { 1284 if (rc == -ENOMEM) {
1285 list_add_tail(&conn->task->running,
1286 &conn->cmdqueue);
1233 conn->task = NULL; 1287 conn->task = NULL;
1234 goto again; 1288 goto again;
1235 } else 1289 } else
1236 fail_command(conn, conn->task, DID_ABORT << 16); 1290 fail_scsi_task(conn->task, DID_ABORT);
1237 continue; 1291 continue;
1238 } 1292 }
1239 rc = iscsi_xmit_task(conn); 1293 rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
1260 1314
1261 conn->task = list_entry(conn->requeue.next, 1315 conn->task = list_entry(conn->requeue.next,
1262 struct iscsi_task, running); 1316 struct iscsi_task, running);
1317 list_del_init(&conn->task->running);
1263 conn->task->state = ISCSI_TASK_RUNNING; 1318 conn->task->state = ISCSI_TASK_RUNNING;
1264 list_move_tail(conn->requeue.next, &conn->run_list);
1265 rc = iscsi_xmit_task(conn); 1319 rc = iscsi_xmit_task(conn);
1266 if (rc) 1320 if (rc)
1267 goto again; 1321 goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1328{ 1382{
1329 struct iscsi_cls_session *cls_session; 1383 struct iscsi_cls_session *cls_session;
1330 struct Scsi_Host *host; 1384 struct Scsi_Host *host;
1385 struct iscsi_host *ihost;
1331 int reason = 0; 1386 int reason = 0;
1332 struct iscsi_session *session; 1387 struct iscsi_session *session;
1333 struct iscsi_conn *conn; 1388 struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1338 sc->SCp.ptr = NULL; 1393 sc->SCp.ptr = NULL;
1339 1394
1340 host = sc->device->host; 1395 host = sc->device->host;
1396 ihost = shost_priv(host);
1341 spin_unlock(host->host_lock); 1397 spin_unlock(host->host_lock);
1342 1398
1343 cls_session = starget_to_session(scsi_target(sc->device)); 1399 cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1350 goto fault; 1406 goto fault;
1351 } 1407 }
1352 1408
1353 /* 1409 if (session->state != ISCSI_STATE_LOGGED_IN) {
1354 * ISCSI_STATE_FAILED is a temp. state. The recovery
1355 * code will decide what is best to do with command queued
1356 * during this time
1357 */
1358 if (session->state != ISCSI_STATE_LOGGED_IN &&
1359 session->state != ISCSI_STATE_FAILED) {
1360 /* 1410 /*
1361 * to handle the race between when we set the recovery state 1411 * to handle the race between when we set the recovery state
1362 * and block the session we requeue here (commands could 1412 * and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1364 * up because the block code is not locked) 1414 * up because the block code is not locked)
1365 */ 1415 */
1366 switch (session->state) { 1416 switch (session->state) {
1417 case ISCSI_STATE_FAILED:
1367 case ISCSI_STATE_IN_RECOVERY: 1418 case ISCSI_STATE_IN_RECOVERY:
1368 reason = FAILURE_SESSION_IN_RECOVERY; 1419 reason = FAILURE_SESSION_IN_RECOVERY;
1369 goto reject; 1420 sc->result = DID_IMM_RETRY << 16;
1421 break;
1370 case ISCSI_STATE_LOGGING_OUT: 1422 case ISCSI_STATE_LOGGING_OUT:
1371 reason = FAILURE_SESSION_LOGGING_OUT; 1423 reason = FAILURE_SESSION_LOGGING_OUT;
1372 goto reject; 1424 sc->result = DID_IMM_RETRY << 16;
1425 break;
1373 case ISCSI_STATE_RECOVERY_FAILED: 1426 case ISCSI_STATE_RECOVERY_FAILED:
1374 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1427 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1375 sc->result = DID_TRANSPORT_FAILFAST << 16; 1428 sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1402 reason = FAILURE_OOM; 1455 reason = FAILURE_OOM;
1403 goto reject; 1456 goto reject;
1404 } 1457 }
1405 list_add_tail(&task->running, &conn->xmitqueue);
1406 1458
1407 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1459 if (!ihost->workq) {
1408 reason = iscsi_prep_scsi_cmd_pdu(task); 1460 reason = iscsi_prep_scsi_cmd_pdu(task);
1409 if (reason) { 1461 if (reason) {
1410 if (reason == -ENOMEM) { 1462 if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1419 reason = FAILURE_SESSION_NOT_READY; 1471 reason = FAILURE_SESSION_NOT_READY;
1420 goto prepd_reject; 1472 goto prepd_reject;
1421 } 1473 }
1422 } else 1474 } else {
1475 list_add_tail(&task->running, &conn->cmdqueue);
1423 iscsi_conn_queue_work(conn); 1476 iscsi_conn_queue_work(conn);
1477 }
1424 1478
1425 session->queued_cmdsn++; 1479 session->queued_cmdsn++;
1426 spin_unlock(&session->lock); 1480 spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1429 1483
1430prepd_reject: 1484prepd_reject:
1431 sc->scsi_done = NULL; 1485 sc->scsi_done = NULL;
1432 iscsi_complete_command(task); 1486 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1433reject: 1487reject:
1434 spin_unlock(&session->lock); 1488 spin_unlock(&session->lock);
1435 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1489 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
1439 1493
1440prepd_fault: 1494prepd_fault:
1441 sc->scsi_done = NULL; 1495 sc->scsi_done = NULL;
1442 iscsi_complete_command(task); 1496 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1443fault: 1497fault:
1444 spin_unlock(&session->lock); 1498 spin_unlock(&session->lock);
1445 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1499 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1608 * Fail commands. session lock held and recv side suspended and xmit 1662 * Fail commands. session lock held and recv side suspended and xmit
1609 * thread flushed 1663 * thread flushed
1610 */ 1664 */
1611static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1665static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1612 int error) 1666 int error)
1613{ 1667{
1614 struct iscsi_task *task, *tmp; 1668 struct iscsi_task *task;
1615 1669 int i;
1616 if (conn->task) {
1617 if (lun == -1 ||
1618 (conn->task->sc && conn->task->sc->device->lun == lun))
1619 conn->task = NULL;
1620 }
1621 1670
1622 /* flush pending */ 1671 for (i = 0; i < conn->session->cmds_max; i++) {
1623 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1672 task = conn->session->cmds[i];
1624 if (lun == task->sc->device->lun || lun == -1) { 1673 if (!task->sc || task->state == ISCSI_TASK_FREE)
1625 ISCSI_DBG_SESSION(conn->session, 1674 continue;
1626 "failing pending sc %p itt 0x%x\n",
1627 task->sc, task->itt);
1628 fail_command(conn, task, error << 16);
1629 }
1630 }
1631 1675
1632 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1676 if (lun != -1 && lun != task->sc->device->lun)
1633 if (lun == task->sc->device->lun || lun == -1) { 1677 continue;
1634 ISCSI_DBG_SESSION(conn->session,
1635 "failing requeued sc %p itt 0x%x\n",
1636 task->sc, task->itt);
1637 fail_command(conn, task, error << 16);
1638 }
1639 }
1640 1678
1641 /* fail all other running */ 1679 ISCSI_DBG_SESSION(conn->session,
1642 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1680 "failing sc %p itt 0x%x state %d\n",
1643 if (lun == task->sc->device->lun || lun == -1) { 1681 task->sc, task->itt, task->state);
1644 ISCSI_DBG_SESSION(conn->session, 1682 fail_scsi_task(task, error);
1645 "failing in progress sc %p itt 0x%x\n",
1646 task->sc, task->itt);
1647 fail_command(conn, task, error << 16);
1648 }
1649 } 1683 }
1650} 1684}
1651 1685
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
1655 struct iscsi_host *ihost = shost_priv(shost); 1689 struct iscsi_host *ihost = shost_priv(shost);
1656 1690
1657 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1691 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1658 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1692 if (ihost->workq)
1659 flush_workqueue(ihost->workq); 1693 flush_workqueue(ihost->workq);
1660} 1694}
1661EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1695EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1663static void iscsi_start_tx(struct iscsi_conn *conn) 1697static void iscsi_start_tx(struct iscsi_conn *conn)
1664{ 1698{
1665 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1699 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1666 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1700 iscsi_conn_queue_work(conn);
1667 iscsi_conn_queue_work(conn); 1701}
1702
1703/*
1704 * We want to make sure a ping is in flight. It has timed out.
1705 * And we are not busy processing a pdu that is making
1706 * progress but got started before the ping and is taking a while
1707 * to complete so the ping is just stuck behind it in a queue.
1708 */
1709static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1710{
1711 if (conn->ping_task &&
1712 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1713 (conn->ping_timeout * HZ), jiffies))
1714 return 1;
1715 else
1716 return 0;
1668} 1717}
1669 1718
1670static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1719static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1702 * if the ping timedout then we are in the middle of cleaning up 1751 * if the ping timedout then we are in the middle of cleaning up
1703 * and can let the iscsi eh handle it 1752 * and can let the iscsi eh handle it
1704 */ 1753 */
1705 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1754 if (iscsi_has_ping_timed_out(conn)) {
1706 (conn->ping_timeout * HZ), jiffies))
1707 rc = BLK_EH_RESET_TIMER; 1755 rc = BLK_EH_RESET_TIMER;
1756 goto done;
1757 }
1708 /* 1758 /*
1709 * if we are about to check the transport then give the command 1759 * if we are about to check the transport then give the command
1710 * more time 1760 * more time
1711 */ 1761 */
1712 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1762 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1713 jiffies)) 1763 jiffies)) {
1714 rc = BLK_EH_RESET_TIMER; 1764 rc = BLK_EH_RESET_TIMER;
1765 goto done;
1766 }
1767
1715 /* if in the middle of checking the transport then give us more time */ 1768 /* if in the middle of checking the transport then give us more time */
1716 if (conn->ping_task) 1769 if (conn->ping_task)
1717 rc = BLK_EH_RESET_TIMER; 1770 rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1738 1791
1739 recv_timeout *= HZ; 1792 recv_timeout *= HZ;
1740 last_recv = conn->last_recv; 1793 last_recv = conn->last_recv;
1741 if (conn->ping_task && 1794
1742 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1795 if (iscsi_has_ping_timed_out(conn)) {
1743 jiffies)) {
1744 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1796 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1745 "expired, last rx %lu, last ping %lu, " 1797 "expired, recv timeout %d, last rx %lu, "
1746 "now %lu\n", conn->ping_timeout, last_recv, 1798 "last ping %lu, now %lu\n",
1747 conn->last_ping, jiffies); 1799 conn->ping_timeout, conn->recv_timeout,
1800 last_recv, conn->last_ping, jiffies);
1748 spin_unlock(&session->lock); 1801 spin_unlock(&session->lock);
1749 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1802 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1750 return; 1803 return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1788 cls_session = starget_to_session(scsi_target(sc->device)); 1841 cls_session = starget_to_session(scsi_target(sc->device));
1789 session = cls_session->dd_data; 1842 session = cls_session->dd_data;
1790 1843
1844 ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
1845
1791 mutex_lock(&session->eh_mutex); 1846 mutex_lock(&session->eh_mutex);
1792 spin_lock_bh(&session->lock); 1847 spin_lock_bh(&session->lock);
1793 /* 1848 /*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1810 sc->SCp.phase != session->age) { 1865 sc->SCp.phase != session->age) {
1811 spin_unlock_bh(&session->lock); 1866 spin_unlock_bh(&session->lock);
1812 mutex_unlock(&session->eh_mutex); 1867 mutex_unlock(&session->eh_mutex);
1868 ISCSI_DBG_SESSION(session, "failing abort due to dropped "
1869 "session.\n");
1813 return FAILED; 1870 return FAILED;
1814 } 1871 }
1815 1872
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1829 } 1886 }
1830 1887
1831 if (task->state == ISCSI_TASK_PENDING) { 1888 if (task->state == ISCSI_TASK_PENDING) {
1832 fail_command(conn, task, DID_ABORT << 16); 1889 fail_scsi_task(task, DID_ABORT);
1833 goto success; 1890 goto success;
1834 } 1891 }
1835 1892
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1860 * then sent more data for the cmd. 1917 * then sent more data for the cmd.
1861 */ 1918 */
1862 spin_lock(&session->lock); 1919 spin_lock(&session->lock);
1863 fail_command(conn, task, DID_ABORT << 16); 1920 fail_scsi_task(task, DID_ABORT);
1864 conn->tmf_state = TMF_INITIAL; 1921 conn->tmf_state = TMF_INITIAL;
1865 spin_unlock(&session->lock); 1922 spin_unlock(&session->lock);
1866 iscsi_start_tx(conn); 1923 iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1967 iscsi_suspend_tx(conn); 2024 iscsi_suspend_tx(conn);
1968 2025
1969 spin_lock_bh(&session->lock); 2026 spin_lock_bh(&session->lock);
1970 fail_all_commands(conn, sc->device->lun, DID_ERROR); 2027 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
1971 conn->tmf_state = TMF_INITIAL; 2028 conn->tmf_state = TMF_INITIAL;
1972 spin_unlock_bh(&session->lock); 2029 spin_unlock_bh(&session->lock);
1973 2030
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2274 if (cmd_task_size) 2331 if (cmd_task_size)
2275 task->dd_data = &task[1]; 2332 task->dd_data = &task[1];
2276 task->itt = cmd_i; 2333 task->itt = cmd_i;
2334 task->state = ISCSI_TASK_FREE;
2277 INIT_LIST_HEAD(&task->running); 2335 INIT_LIST_HEAD(&task->running);
2278 } 2336 }
2279 2337
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2360 conn->transport_timer.data = (unsigned long)conn; 2418 conn->transport_timer.data = (unsigned long)conn;
2361 conn->transport_timer.function = iscsi_check_transport_timeouts; 2419 conn->transport_timer.function = iscsi_check_transport_timeouts;
2362 2420
2363 INIT_LIST_HEAD(&conn->run_list);
2364 INIT_LIST_HEAD(&conn->mgmt_run_list);
2365 INIT_LIST_HEAD(&conn->mgmtqueue); 2421 INIT_LIST_HEAD(&conn->mgmtqueue);
2366 INIT_LIST_HEAD(&conn->xmitqueue); 2422 INIT_LIST_HEAD(&conn->cmdqueue);
2367 INIT_LIST_HEAD(&conn->requeue); 2423 INIT_LIST_HEAD(&conn->requeue);
2368 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2424 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2369 2425
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2531EXPORT_SYMBOL_GPL(iscsi_conn_start); 2587EXPORT_SYMBOL_GPL(iscsi_conn_start);
2532 2588
2533static void 2589static void
2534flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2590fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
2535{ 2591{
2536 struct iscsi_task *task, *tmp; 2592 struct iscsi_task *task;
2593 int i, state;
2537 2594
2538 /* handle pending */ 2595 for (i = 0; i < conn->session->cmds_max; i++) {
2539 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2596 task = conn->session->cmds[i];
2540 ISCSI_DBG_SESSION(session, "flushing pending mgmt task " 2597 if (task->sc)
2541 "itt 0x%x\n", task->itt); 2598 continue;
2542 /* release ref from prep task */
2543 __iscsi_put_task(task);
2544 }
2545 2599
2546 /* handle running */ 2600 if (task->state == ISCSI_TASK_FREE)
2547 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2601 continue;
2548 ISCSI_DBG_SESSION(session, "flushing running mgmt task " 2602
2549 "itt 0x%x\n", task->itt); 2603 ISCSI_DBG_SESSION(conn->session,
2550 /* release ref from prep task */ 2604 "failing mgmt itt 0x%x state %d\n",
2551 __iscsi_put_task(task); 2605 task->itt, task->state);
2552 } 2606 state = ISCSI_TASK_ABRT_SESS_RECOV;
2607 if (task->state == ISCSI_TASK_PENDING)
2608 state = ISCSI_TASK_COMPLETED;
2609 iscsi_complete_task(task, state);
2553 2610
2554 conn->task = NULL; 2611 }
2555} 2612}
2556 2613
2557static void iscsi_start_session_recovery(struct iscsi_session *session, 2614static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2559{ 2616{
2560 int old_stop_stage; 2617 int old_stop_stage;
2561 2618
2562 del_timer_sync(&conn->transport_timer);
2563
2564 mutex_lock(&session->eh_mutex); 2619 mutex_lock(&session->eh_mutex);
2565 spin_lock_bh(&session->lock); 2620 spin_lock_bh(&session->lock);
2566 if (conn->stop_stage == STOP_CONN_TERM) { 2621 if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2578 session->state = ISCSI_STATE_TERMINATE; 2633 session->state = ISCSI_STATE_TERMINATE;
2579 else if (conn->stop_stage != STOP_CONN_RECOVER) 2634 else if (conn->stop_stage != STOP_CONN_RECOVER)
2580 session->state = ISCSI_STATE_IN_RECOVERY; 2635 session->state = ISCSI_STATE_IN_RECOVERY;
2636 spin_unlock_bh(&session->lock);
2637
2638 del_timer_sync(&conn->transport_timer);
2639 iscsi_suspend_tx(conn);
2581 2640
2641 spin_lock_bh(&session->lock);
2582 old_stop_stage = conn->stop_stage; 2642 old_stop_stage = conn->stop_stage;
2583 conn->stop_stage = flag; 2643 conn->stop_stage = flag;
2584 conn->c_stage = ISCSI_CONN_STOPPED; 2644 conn->c_stage = ISCSI_CONN_STOPPED;
2585 spin_unlock_bh(&session->lock); 2645 spin_unlock_bh(&session->lock);
2586 2646
2587 iscsi_suspend_tx(conn);
2588 /* 2647 /*
2589 * for connection level recovery we should not calculate 2648 * for connection level recovery we should not calculate
2590 * header digest. conn->hdr_size used for optimization 2649 * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2605 * flush queues. 2664 * flush queues.
2606 */ 2665 */
2607 spin_lock_bh(&session->lock); 2666 spin_lock_bh(&session->lock);
2608 if (flag == STOP_CONN_RECOVER) 2667 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
2609 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2668 fail_mgmt_tasks(session, conn);
2610 else
2611 fail_all_commands(conn, -1, DID_ERROR);
2612 flush_control_queues(session, conn);
2613 spin_unlock_bh(&session->lock); 2669 spin_unlock_bh(&session->lock);
2614 mutex_unlock(&session->eh_mutex); 2670 mutex_unlock(&session->eh_mutex);
2615} 2671}
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2651} 2707}
2652EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2708EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2653 2709
2710static int iscsi_switch_str_param(char **param, char *new_val_buf)
2711{
2712 char *new_val;
2713
2714 if (*param) {
2715 if (!strcmp(*param, new_val_buf))
2716 return 0;
2717 }
2718
2719 new_val = kstrdup(new_val_buf, GFP_NOIO);
2720 if (!new_val)
2721 return -ENOMEM;
2722
2723 kfree(*param);
2724 *param = new_val;
2725 return 0;
2726}
2654 2727
2655int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2728int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2656 enum iscsi_param param, char *buf, int buflen) 2729 enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2723 sscanf(buf, "%u", &conn->exp_statsn); 2796 sscanf(buf, "%u", &conn->exp_statsn);
2724 break; 2797 break;
2725 case ISCSI_PARAM_USERNAME: 2798 case ISCSI_PARAM_USERNAME:
2726 kfree(session->username); 2799 return iscsi_switch_str_param(&session->username, buf);
2727 session->username = kstrdup(buf, GFP_KERNEL);
2728 if (!session->username)
2729 return -ENOMEM;
2730 break;
2731 case ISCSI_PARAM_USERNAME_IN: 2800 case ISCSI_PARAM_USERNAME_IN:
2732 kfree(session->username_in); 2801 return iscsi_switch_str_param(&session->username_in, buf);
2733 session->username_in = kstrdup(buf, GFP_KERNEL);
2734 if (!session->username_in)
2735 return -ENOMEM;
2736 break;
2737 case ISCSI_PARAM_PASSWORD: 2802 case ISCSI_PARAM_PASSWORD:
2738 kfree(session->password); 2803 return iscsi_switch_str_param(&session->password, buf);
2739 session->password = kstrdup(buf, GFP_KERNEL);
2740 if (!session->password)
2741 return -ENOMEM;
2742 break;
2743 case ISCSI_PARAM_PASSWORD_IN: 2804 case ISCSI_PARAM_PASSWORD_IN:
2744 kfree(session->password_in); 2805 return iscsi_switch_str_param(&session->password_in, buf);
2745 session->password_in = kstrdup(buf, GFP_KERNEL);
2746 if (!session->password_in)
2747 return -ENOMEM;
2748 break;
2749 case ISCSI_PARAM_TARGET_NAME: 2806 case ISCSI_PARAM_TARGET_NAME:
2750 /* this should not change between logins */ 2807 return iscsi_switch_str_param(&session->targetname, buf);
2751 if (session->targetname)
2752 break;
2753
2754 session->targetname = kstrdup(buf, GFP_KERNEL);
2755 if (!session->targetname)
2756 return -ENOMEM;
2757 break;
2758 case ISCSI_PARAM_TPGT: 2808 case ISCSI_PARAM_TPGT:
2759 sscanf(buf, "%d", &session->tpgt); 2809 sscanf(buf, "%d", &session->tpgt);
2760 break; 2810 break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2762 sscanf(buf, "%d", &conn->persistent_port); 2812 sscanf(buf, "%d", &conn->persistent_port);
2763 break; 2813 break;
2764 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2814 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2765 /* 2815 return iscsi_switch_str_param(&conn->persistent_address, buf);
2766 * this is the address returned in discovery so it should
2767 * not change between logins.
2768 */
2769 if (conn->persistent_address)
2770 break;
2771
2772 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2773 if (!conn->persistent_address)
2774 return -ENOMEM;
2775 break;
2776 case ISCSI_PARAM_IFACE_NAME: 2816 case ISCSI_PARAM_IFACE_NAME:
2777 if (!session->ifacename) 2817 return iscsi_switch_str_param(&session->ifacename, buf);
2778 session->ifacename = kstrdup(buf, GFP_KERNEL);
2779 break;
2780 case ISCSI_PARAM_INITIATOR_NAME: 2818 case ISCSI_PARAM_INITIATOR_NAME:
2781 if (!session->initiatorname) 2819 return iscsi_switch_str_param(&session->initiatorname, buf);
2782 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2783 break;
2784 default: 2820 default:
2785 return -ENOSYS; 2821 return -ENOSYS;
2786 } 2822 }
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2851 len = sprintf(buf, "%s\n", session->ifacename); 2887 len = sprintf(buf, "%s\n", session->ifacename);
2852 break; 2888 break;
2853 case ISCSI_PARAM_INITIATOR_NAME: 2889 case ISCSI_PARAM_INITIATOR_NAME:
2854 if (!session->initiatorname) 2890 len = sprintf(buf, "%s\n", session->initiatorname);
2855 len = sprintf(buf, "%s\n", "unknown");
2856 else
2857 len = sprintf(buf, "%s\n", session->initiatorname);
2858 break; 2891 break;
2859 default: 2892 default:
2860 return -ENOSYS; 2893 return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2920 2953
2921 switch (param) { 2954 switch (param) {
2922 case ISCSI_HOST_PARAM_NETDEV_NAME: 2955 case ISCSI_HOST_PARAM_NETDEV_NAME:
2923 if (!ihost->netdev) 2956 len = sprintf(buf, "%s\n", ihost->netdev);
2924 len = sprintf(buf, "%s\n", "default");
2925 else
2926 len = sprintf(buf, "%s\n", ihost->netdev);
2927 break; 2957 break;
2928 case ISCSI_HOST_PARAM_HWADDRESS: 2958 case ISCSI_HOST_PARAM_HWADDRESS:
2929 if (!ihost->hwaddress) 2959 len = sprintf(buf, "%s\n", ihost->hwaddress);
2930 len = sprintf(buf, "%s\n", "default");
2931 else
2932 len = sprintf(buf, "%s\n", ihost->hwaddress);
2933 break; 2960 break;
2934 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2961 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2935 if (!ihost->initiatorname) 2962 len = sprintf(buf, "%s\n", ihost->initiatorname);
2936 len = sprintf(buf, "%s\n", "unknown");
2937 else
2938 len = sprintf(buf, "%s\n", ihost->initiatorname);
2939 break; 2963 break;
2940 case ISCSI_HOST_PARAM_IPADDRESS: 2964 case ISCSI_HOST_PARAM_IPADDRESS:
2941 if (!strlen(ihost->local_address)) 2965 len = sprintf(buf, "%s\n", ihost->local_address);
2942 len = sprintf(buf, "%s\n", "unknown");
2943 else
2944 len = sprintf(buf, "%s\n",
2945 ihost->local_address);
2946 break; 2966 break;
2947 default: 2967 default:
2948 return -ENOSYS; 2968 return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2959 2979
2960 switch (param) { 2980 switch (param) {
2961 case ISCSI_HOST_PARAM_NETDEV_NAME: 2981 case ISCSI_HOST_PARAM_NETDEV_NAME:
2962 if (!ihost->netdev) 2982 return iscsi_switch_str_param(&ihost->netdev, buf);
2963 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2964 break;
2965 case ISCSI_HOST_PARAM_HWADDRESS: 2983 case ISCSI_HOST_PARAM_HWADDRESS:
2966 if (!ihost->hwaddress) 2984 return iscsi_switch_str_param(&ihost->hwaddress, buf);
2967 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2968 break;
2969 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2985 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2970 if (!ihost->initiatorname) 2986 return iscsi_switch_str_param(&ihost->initiatorname, buf);
2971 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2972 break;
2973 default: 2987 default:
2974 return -ENOSYS; 2988 return -ENOSYS;
2975 } 2989 }
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index b579ca9f4836..2bc07090321d 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
440 struct iscsi_tcp_task *tcp_task = task->dd_data; 440 struct iscsi_tcp_task *tcp_task = task->dd_data;
441 struct iscsi_r2t_info *r2t; 441 struct iscsi_r2t_info *r2t;
442 442
443 /* nothing to do for mgmt or pending tasks */ 443 /* nothing to do for mgmt */
444 if (!task->sc || task->state == ISCSI_TASK_PENDING) 444 if (!task->sc)
445 return; 445 return;
446 446
447 /* flush task's r2t queues */ 447 /* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
473 int datasn = be32_to_cpu(rhdr->datasn); 473 int datasn = be32_to_cpu(rhdr->datasn);
474 unsigned total_in_length = scsi_in(task->sc)->length; 474 unsigned total_in_length = scsi_in(task->sc)->length;
475 475
476 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); 476 /*
477 * lib iscsi will update this in the completion handling if there
478 * is status.
479 */
480 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
481 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
482
477 if (tcp_conn->in.datalen == 0) 483 if (tcp_conn->in.datalen == 0)
478 return 0; 484 return 0;
479 485
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
857 int rc = 0; 863 int rc = 0;
858 864
859 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); 865 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
866 /*
867 * Update for each skb instead of pdu, because over slow networks a
868 * data_in's data could take a while to read in. We also want to
869 * account for r2ts.
870 */
871 conn->last_recv = jiffies;
860 872
861 if (unlikely(conn->suspend_rx)) { 873 if (unlikely(conn->suspend_rx)) {
862 ISCSI_DBG_TCP(conn, "Rx suspended!\n"); 874 ISCSI_DBG_TCP(conn, "Rx suspended!\n");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111ba..540569849099 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t,
467 struct lpfc_iocbq *, uint32_t);
468 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
469 struct lpfc_iocbq *);
470 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
471
472
473 IOCB_t * (*lpfc_get_iocb_from_iocbq)
474 (struct lpfc_iocbq *);
475 void (*lpfc_scsi_cmd_iocb_cmpl)
476 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
477
478 /* MBOX interface function jump table entries */
479 int (*lpfc_sli_issue_mbox)
480 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
481 /* Slow-path IOCB process function jump table entries */
482 void (*lpfc_sli_handle_slow_ring_event)
483 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
484 uint32_t mask);
485 /* INIT device interface function jump table entries */
486 int (*lpfc_sli_hbq_to_firmware)
487 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
488 int (*lpfc_sli_brdrestart)
489 (struct lpfc_hba *);
490 int (*lpfc_sli_brdready)
491 (struct lpfc_hba *, uint32_t);
492 void (*lpfc_handle_eratt)
493 (struct lpfc_hba *);
494 void (*lpfc_stop_port)
495 (struct lpfc_hba *);
496
497
498 /* SLI4 specific HBA data structure */
499 struct lpfc_sli4_hba sli4_hba;
500
423 struct lpfc_sli sli; 501 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 502 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
503 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 504 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 505#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 506#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 508#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 509#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 510#define LPFC_SLI3_BG_ENABLED 0x20
511#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 512 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 513 uint32_t iocb_rsp_size;
434 514
@@ -442,8 +522,13 @@ struct lpfc_hba {
442 522
443 uint32_t hba_flag; /* hba generic flags */ 523 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 524#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 525#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 526#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
527#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
528#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
529#define FCP_XRI_ABORT_EVENT 0x20
530#define ELS_XRI_ABORT_EVENT 0x40
531#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 532 struct lpfc_dmabuf slim2p;
448 533
449 MAILBOX_t *mbox; 534 MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 587 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 588 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 589 uint32_t cfg_use_msi;
590 uint32_t cfg_fcp_imax;
591 uint32_t cfg_fcp_wq_count;
592 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 593 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 594 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 595 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 599 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 600 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 601 uint32_t cfg_enable_bg;
602 uint32_t cfg_enable_fip;
603 uint32_t cfg_log_verbose;
514 604
515 lpfc_vpd_t vpd; /* vital product data */ 605 lpfc_vpd_t vpd; /* vital product data */
516 606
@@ -526,11 +616,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 616 unsigned long data_flags;
527 617
528 uint32_t hbq_in_use; /* HBQs in use flag */ 618 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 619 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 620 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 621 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 622
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 623 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
624 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 625 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 626 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 627 PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 684 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 685 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 686 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 687 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
688 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 689 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 690
599 mempool_t *mbox_mem_pool; 691 mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 701 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 702 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 703#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
704 uint16_t max_vports; /*
705 * For IOV HBAs max_vpi can change
706 * after a reset. max_vports is max
707 * number of vports present. This can
708 * be greater than max_vpi.
709 */
710 uint16_t vpi_base;
711 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 712 unsigned long *vpi_bmask; /* vpi allocation table */
613 713
614 /* Data structure used by fabric iocb scheduler */ 714 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 767/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 768#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 769 atomic_t fast_event_count;
770 struct lpfc_fcf fcf;
771 uint8_t fc_map[3];
772 uint8_t valid_vlan;
773 uint16_t vlan_id;
774 struct list_head fcf_conn_rec_list;
670}; 775};
671 776
672static inline struct Scsi_Host * 777static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb125..d73e677201f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3278 }
3200 } 3279 }
3201 3280
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3281 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3282 buf, count);
3204 3283
3205 phba->sysfs_mbox.offset = off + count; 3284 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3320 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3321 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3322 int rc;
3323 MAILBOX_t *pmb;
3244 3324
3245 if (off > MAILBOX_CMD_SIZE) 3325 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3326 return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3345 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3346 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3347 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3348 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3349 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3350 /* Offline only */
3271 case MBX_INIT_LINK: 3351 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3352 case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3363 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3364 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3365 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3366 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3367 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3369 return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3399 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3400 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3401 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3402 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3403 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3404 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3405 return -EPERM;
3326 default: 3406 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3411 return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3415 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3416 */
3337 if (phba->pport->stopped && 3417 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3418 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3419 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3420 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3421 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3422 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3423 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3424 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3425 pmb->mbxCommand);
3346 3426
3347 phba->sysfs_mbox.mbox->vport = vport; 3427 phba->sysfs_mbox.mbox->vport = vport;
3348 3428
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3436 }
3357 3437
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3438 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3439 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3440
3361 spin_unlock_irq(&phba->hbalock); 3441 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3442 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3448 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3449 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3450 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3451 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3452 spin_lock_irq(&phba->hbalock);
3374 } 3453 }
3375 3454
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3470 return -EAGAIN;
3392 } 3471 }
3393 3472
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3473 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3474
3396 phba->sysfs_mbox.offset = off + count; 3475 phba->sysfs_mbox.offset = off + count;
3397 3476
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3664 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3665 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3666 break;
3667 case LA_10GHZ_LINK:
3668 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3669 break;
3588 default: 3670 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3671 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3672 break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3734 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3735 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3736 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3737 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3738 return NULL;
3657 3739
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3740 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3745 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3746 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3747
3666 pmb = &pmboxq->mb; 3748 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3749 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3750 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3751 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3752 pmboxq->vport = vport;
3671 3753
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3754 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3755 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3756 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3757 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3758 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3777 pmboxq->vport = vport;
3696 3778
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3779 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3780 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3782 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3851 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3852 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3853
3772 pmb = &pmboxq->mb; 3854 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3855 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3856 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3857 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3859 pmboxq->vport = vport;
3778 3860
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3861 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3862 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3863 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3864 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3865 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3877 pmboxq->vport = vport;
3796 3878
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3879 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3880 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3881 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3882 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3883 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4044 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4045}
3964 4046
4047/**
4048 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4049 * @phba: Pointer to lpfc_hba struct.
4050 *
4051 * This function is called by the lpfc_get_cfgparam() routine to set the
4052 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4053 * log messsage according to the module's lpfc_log_verbose parameter setting
4054 * before hba port or vport created.
4055 **/
4056static void
4057lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4058{
4059 phba->cfg_log_verbose = verbose;
4060}
4061
3965struct fc_function_template lpfc_transport_functions = { 4062struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4063 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4064 .show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4202 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4203 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4204 lpfc_use_msi_init(phba, lpfc_use_msi);
4205 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4206 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4207 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4208 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4209 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4210 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4213 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4214 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4215 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4216 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4217 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4218 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4219
4136 return; 4220 return;
4137} 4221}
4138 4222
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f26190..d2a922997c0f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e5..1dbccfd3d022 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (vp->rev.rBit) {
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1737 rev = vp->rev.sli2FwRev;
1735 else 1738 else
1736 rev = vp->rev.sli1FwRev; 1739 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1759 }
1757 b4 = (rev & 0x0000000f); 1760 b4 = (rev & 0x0000000f);
1758 1761
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1762 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1763 fwname = vp->rev.sli2FwName;
1761 else 1764 else
1762 fwname = vp->rev.sli1FwName; 1765 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07a..2b02b1fb39a0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 282 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 283 struct hbq_dmabuf *hbq_buf;
282 284
285 if (phba->sli_rev != 3)
286 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 287 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 288 spin_lock_irq(&phba->hbalock);
285 289
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 493 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 494 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 495 }
492 word0 = readl(phba->HAregaddr); 496
493 word1 = readl(phba->CAregaddr); 497 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 498 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 499 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 500 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 501 word3 = readl(phba->HCregaddr);
502 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
503 "HC:%08x\n", word0, word1, word2, word3);
504 }
498 spin_unlock_irq(&phba->hbalock); 505 spin_unlock_irq(&phba->hbalock);
499 return len; 506 return len;
500} 507}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd108972072..1142070e9484 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d2..6bdeb14878a2 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 222 icmd->un.elsreq64.myID = vport->fc_myDID;
220 223
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 224 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 225 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 226 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 227 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 228 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 308 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 309 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 310 **/
308static int 311int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 312lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 313{
311 struct lpfc_hba *phba = vport->phba; 314 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 348 err = 4;
346 goto fail; 349 goto fail;
347 } 350 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 351 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 352 if (rc) {
351 err = 5; 353 err = 5;
352 goto fail_free_mbox; 354 goto fail_free_mbox;
@@ -386,6 +388,75 @@ fail:
386} 388}
387 389
388/** 390/**
391 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
392 * @vport: pointer to a host virtual N_Port data structure.
393 *
394 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
395 * the @vport. This mailbox command is necessary for FCoE only.
396 *
397 * Return code
398 * 0 - successfully issued REG_VFI for @vport
399 * A failure code otherwise.
400 **/
401static int
402lpfc_issue_reg_vfi(struct lpfc_vport *vport)
403{
404 struct lpfc_hba *phba = vport->phba;
405 LPFC_MBOXQ_t *mboxq;
406 struct lpfc_nodelist *ndlp;
407 struct serv_parm *sp;
408 struct lpfc_dmabuf *dmabuf;
409 int rc = 0;
410
411 sp = &phba->fc_fabparam;
412 ndlp = lpfc_findnode_did(vport, Fabric_DID);
413 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
414 rc = -ENODEV;
415 goto fail;
416 }
417
418 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
419 if (!dmabuf) {
420 rc = -ENOMEM;
421 goto fail;
422 }
423 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
424 if (!dmabuf->virt) {
425 rc = -ENOMEM;
426 goto fail_free_dmabuf;
427 }
428 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
429 if (!mboxq) {
430 rc = -ENOMEM;
431 goto fail_free_coherent;
432 }
433 vport->port_state = LPFC_FABRIC_CFG_LINK;
434 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
435 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
436 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
437 mboxq->vport = vport;
438 mboxq->context1 = dmabuf;
439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
440 if (rc == MBX_NOT_FINISHED) {
441 rc = -ENXIO;
442 goto fail_free_mbox;
443 }
444 return 0;
445
446fail_free_mbox:
447 mempool_free(mboxq, phba->mbox_mem_pool);
448fail_free_coherent:
449 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
450fail_free_dmabuf:
451 kfree(dmabuf);
452fail:
453 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
454 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
455 "0289 Issue Register VFI failed: Err %d\n", rc);
456 return rc;
457}
458
459/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 460 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 461 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 462 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 568 }
498 } 569 }
499 570
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 571 if (phba->sli_rev < LPFC_SLI_REV4) {
501 572 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 573 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 574 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 575 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 576 else
577 lpfc_issue_fabric_reglogin(vport);
578 } else {
579 ndlp->nlp_type |= NLP_FABRIC;
580 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
581 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
582 lpfc_start_fdiscs(phba);
583 lpfc_do_scr_ns_plogi(phba, vport);
584 } else
585 lpfc_issue_reg_vfi(vport);
506 } 586 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 587 return 0;
509} 588}
510
511/** 589/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 590 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 591 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 893 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 894 sp->cmn.fcphHigh = FC_PH3;
817 895
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 896 if (phba->sli_rev == LPFC_SLI_REV4) {
897 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
898 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
899 /* FLOGI needs to be 3 for WQE FCFI */
900 /* Set the fcfi to the fcfi we registered with */
901 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
902 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 903 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 904 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 905 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 906 icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1013 if (!ndlp)
931 return 0; 1014 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1015 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1016 /* Set the node type */
1017 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1018 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1019 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1020 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1435 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1436 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1437 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1438 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1439 uint8_t *pcmd;
1356 uint16_t cmdsize; 1440 uint16_t cmdsize;
1357 int ret; 1441 int ret;
1358 1442
1359 psli = &phba->sli; 1443 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1444
1362 ndlp = lpfc_findnode_did(vport, did); 1445 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1446 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1474
1392 phba->fc_stat.elsXmitPLOGI++; 1475 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1476 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1477 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1478
1396 if (ret == IOCB_ERROR) { 1479 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1480 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1584 PRLI *npr;
1502 IOCB_t *icmd; 1585 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1586 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1587 uint8_t *pcmd;
1507 uint16_t cmdsize; 1588 uint16_t cmdsize;
1508 1589
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1590 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1591 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1592 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1628 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1629 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1630 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1631 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1632 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1633 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1634 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1635 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1687 * and continue discovery.
1609 */ 1688 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1690 !(vport->fc_flag & FC_RSCN_MODE) &&
1691 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1692 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1693 return;
1614 } 1694 }
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1868 ADISC *ap;
1789 IOCB_t *icmd; 1869 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1870 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1871 uint8_t *pcmd;
1794 uint16_t cmdsize; 1872 uint16_t cmdsize;
1795 1873
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1900 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1901 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1902 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1903 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1904 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1905 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1906 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1907 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2016 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2017 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2018 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2019 uint8_t *pcmd;
1943 uint16_t cmdsize; 2020 uint16_t cmdsize;
1944 int rc; 2021 int rc;
1945 2022
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2023 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2024 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2025 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2052 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2053 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2054 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2055 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2056
1983 if (rc == IOCB_ERROR) { 2057 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2058 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2132 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2133 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2134 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2135 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2136 uint8_t *pcmd;
2064 uint16_t cmdsize; 2137 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2138 struct lpfc_nodelist *ndlp;
2066 2139
2067 psli = &phba->sli; 2140 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2141 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2142
2071 ndlp = lpfc_findnode_did(vport, nportid); 2143 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2180
2109 phba->fc_stat.elsXmitSCR++; 2181 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2182 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2183 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2184 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2185 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2186 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2187 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2225 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2226 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2227 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2228 struct lpfc_sli *psli;
2157 FARP *fp; 2229 FARP *fp;
2158 uint8_t *pcmd; 2230 uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2234 struct lpfc_nodelist *ndlp;
2163 2235
2164 psli = &phba->sli; 2236 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2237 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2238
2168 ndlp = lpfc_findnode_did(vport, nportid); 2239 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2290
2220 phba->fc_stat.elsXmitFARPR++; 2291 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2292 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2293 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2294 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2295 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2296 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2297 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3021 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3022 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3023
3024 /*
3025 * This routine is used to register and unregister in previous SLI
3026 * modes.
3027 */
3028 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3029 (phba->sli_rev == LPFC_SLI_REV4))
3030 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3031
2952 pmb->context1 = NULL; 3032 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3033 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3034 kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3041 */
2962 lpfc_nlp_not_used(ndlp); 3042 lpfc_nlp_not_used(ndlp);
2963 } 3043 }
3044
2964 return; 3045 return;
2965} 3046}
2966 3047
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3251 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3252 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3253 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3254 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3255 uint8_t *pcmd;
3176 uint16_t cmdsize; 3256 uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3258 ELS_PKT *els_pkt_ptr;
3179 3259
3180 psli = &phba->sli; 3260 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3261 oldcmd = &oldiocb->iocb;
3183 3262
3184 switch (flag) { 3263 switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3345 }
3267 3346
3268 phba->fc_stat.elsXmitACC++; 3347 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3348 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3349 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3350 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3351 return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3384 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3385 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3386 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3387 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3388 uint8_t *pcmd;
3311 uint16_t cmdsize; 3389 uint16_t cmdsize;
3312 int rc; 3390 int rc;
3313 3391
3314 psli = &phba->sli; 3392 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3393 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3394 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3395 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3422
3347 phba->fc_stat.elsXmitLSRJT++; 3423 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3425 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3426
3351 if (rc == IOCB_ERROR) { 3427 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3428 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3455 struct lpfc_nodelist *ndlp)
3380{ 3456{
3381 struct lpfc_hba *phba = vport->phba; 3457 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3458 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3459 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3460 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3496
3423 phba->fc_stat.elsXmitACC++; 3497 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3498 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3499 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3500 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3501 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3502 return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3533 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3534 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3535 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3536 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3537 uint8_t *pcmd;
3465 uint16_t cmdsize; 3538 uint16_t cmdsize;
3466 int rc; 3539 int rc;
3467 3540
3468 psli = &phba->sli; 3541 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3542
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3543 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3544 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3592 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3593 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3594
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3595 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3596 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3597 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3598 return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3634 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3635 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3636 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3637 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3638 uint8_t *pcmd;
3568 uint16_t cmdsize; 3639 uint16_t cmdsize;
3569 int rc; 3640 int rc;
3570 3641
3571 psli = &phba->sli; 3642 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3643 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3644 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3645 if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3695 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3696 * it could be freed */
3628 3697
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3699 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3700 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3701 return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3908 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3909 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3910 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3911 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3912 && (ns_did.un.b.area == rscn_did.un.b.area)
3913 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3914 goto return_did_out;
3844 break; 3915 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3916 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4371 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4372 phba->cfg_topology,
4302 phba->cfg_link_speed); 4373 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4374 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4375 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4376 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4511static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4512lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4513{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4514 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4515 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4516 RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4520 uint16_t xri, status;
4452 uint32_t cmdsize; 4521 uint32_t cmdsize;
4453 4522
4454 mb = &pmb->mb; 4523 mb = &pmb->u.mb;
4455 4524
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4525 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4526 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4576 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4577 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4578 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4579 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4580 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4581 return;
4513} 4582}
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4685 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4686 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4687 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4688 uint8_t *pcmd;
4622 4689
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4690 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4721 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4722 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4723 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4724 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4725 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4726 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4727 return 1;
4660 } 4728 }
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4951 } else {
4884 /* FAN verified - skip FLOGI */ 4952 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4953 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4954 if (phba->sli_rev < LPFC_SLI_REV4)
4955 lpfc_issue_fabric_reglogin(vport);
4956 else
4957 lpfc_issue_reg_vfi(vport);
4887 } 4958 }
4888 } 4959 }
4889 return 0; 4960 return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5637
5567dropit: 5638dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5639 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5640 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5641 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5642 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5643 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5644 phba->fc_stat.elsRcvDrop++;
5575} 5645}
5576 5646
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5716 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5717 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5718 vport = phba->pport;
5649 else { 5719 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5720 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5721 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5722 }
5654 /* If there are no BDEs associated 5723 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5724 * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5850 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5852 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5853 MAILBOX_t *mb = &pmb->u.mb;
5785 5854
5786 spin_lock_irq(shost->host_lock); 5855 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5856 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5887
5819 } else { 5888 } else {
5820 if (vport == phba->pport) 5889 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5890 if (phba->sli_rev < LPFC_SLI_REV4)
5891 lpfc_issue_fabric_reglogin(vport);
5892 else
5893 lpfc_issue_reg_vfi(vport);
5822 else 5894 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5895 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5896 }
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5922
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5924 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5925 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5926 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5927 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5928 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6211{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6213 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6214 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6215 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6216 uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6240 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6241 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6242 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6243 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6244 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6245 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6246 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6247 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6296 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6297 unsigned long iflags;
6226 int ret; 6298 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6299 IOCB_t *cmd;
6229 6300
6230repeat: 6301repeat:
@@ -6248,7 +6319,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6319 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6320 iocb->vport->port_state, 0, 0);
6250 6321
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6322 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6323
6253 if (ret == IOCB_ERROR) { 6324 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6325 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6465lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6466{
6396 unsigned long iflags; 6467 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6468 int ready;
6399 int ret; 6469 int ret;
6400 6470
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6488 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6489 iocb->vport->port_state, 0, 0);
6420 6490
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6491 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6492
6423 if (ret == IOCB_ERROR) { 6493 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6494 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6595 IOERR_SLI_ABORTED);
6526} 6596}
6597
6598/**
6599 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6600 * @phba: pointer to lpfc hba data structure.
6601 * @axri: pointer to the els xri abort wcqe structure.
6602 *
6603 * This routine is invoked by the worker thread to process a SLI4 slow-path
6604 * ELS aborted xri.
6605 **/
6606void
6607lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6608 struct sli4_wcqe_xri_aborted *axri)
6609{
6610 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6611 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6612 unsigned long iflag = 0;
6613
6614 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6615 list_for_each_entry_safe(sglq_entry, sglq_next,
6616 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6617 if (sglq_entry->sli4_xritag == xri) {
6618 list_del(&sglq_entry->list);
6619 spin_unlock_irqrestore(
6620 &phba->sli4_hba.abts_sgl_list_lock,
6621 iflag);
6622 spin_lock_irqsave(&phba->hbalock, iflag);
6623
6624 list_add_tail(&sglq_entry->list,
6625 &phba->sli4_hba.lpfc_sgl_list);
6626 spin_unlock_irqrestore(&phba->hbalock, iflag);
6627 return;
6628 }
6629 }
6630 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6631}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf704..35c41ae75be2 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,592 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1203 new_fcf_record);
1204 if (phba->valid_vlan)
1205 *vlan_id = phba->vlan_id;
1206 else
1207 *vlan_id = 0xFFFF;
1208 return 1;
1209 }
1210
1211 /*
1212 * If there are no FCF connection table entry, driver connect to all
1213 * FCFs.
1214 */
1215 if (list_empty(&phba->fcf_conn_rec_list)) {
1216 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record);
1219 *vlan_id = 0xFFFF;
1220 return 1;
1221 }
1222
1223 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1224 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1225 continue;
1226
1227 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1228 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1229 new_fcf_record))
1230 continue;
1231
1232 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1233 /*
1234 * If the vlan bit map does not have the bit set for the
1235 * vlan id to be used, then it is not a match.
1236 */
1237 if (!(new_fcf_record->vlan_bitmap
1238 [conn_entry->conn_rec.vlan_tag / 8] &
1239 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1240 continue;
1241 }
1242
1243 /*
1244 * Check if the connection record specifies a required
1245 * addressing mode.
1246 */
1247 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1248 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1249
1250 /*
1251 * If SPMA required but FCF not support this continue.
1252 */
1253 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1254 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1255 new_fcf_record) & LPFC_FCF_SPMA))
1256 continue;
1257
1258 /*
1259 * If FPMA required but FCF not support this continue.
1260 */
1261 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1262 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1263 new_fcf_record) & LPFC_FCF_FPMA))
1264 continue;
1265 }
1266
1267 /*
1268 * This fcf record matches filtering criteria.
1269 */
1270 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1271 *boot_flag = 1;
1272 else
1273 *boot_flag = 0;
1274
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record);
1277 /*
1278 * If the user specified a required address mode, assign that
1279 * address mode
1280 */
1281 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1282 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1283 *addr_mode = (conn_entry->conn_rec.flags &
1284 FCFCNCT_AM_SPMA) ?
1285 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1286 /*
1287 * If the user specified a prefered address mode, use the
1288 * addr mode only if FCF support the addr_mode.
1289 */
1290 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1291 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1292 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1293 (*addr_mode & LPFC_FCF_SPMA))
1294 *addr_mode = LPFC_FCF_SPMA;
1295 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1296 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag;
1309 else
1310 *vlan_id = 0xFFFF;
1311
1312 return 1;
1313 }
1314
1315 return 0;
1316}
1317
1318/**
1319 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1320 * @phba: pointer to lpfc hba data structure.
1321 * @mboxq: pointer to mailbox object.
1322 *
1323 * This function iterate through all the fcf records available in
1324 * HBA and choose the optimal FCF record for discovery. After finding
1325 * the FCF for discovery it register the FCF record and kick start
1326 * discovery.
1327 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1328 * use a FCF record which match fabric name and mac address of the
1329 * currently used FCF record.
1330 * If the driver support only one FCF, it will try to use the FCF record
1331 * used by BOOT_BIOS.
1332 */
1333void
1334lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1335{
1336 void *virt_addr;
1337 dma_addr_t phys_addr;
1338 uint8_t *bytep;
1339 struct lpfc_mbx_sge sge;
1340 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1341 uint32_t shdr_status, shdr_add_status;
1342 union lpfc_sli4_cfg_shdr *shdr;
1343 struct fcf_record *new_fcf_record;
1344 int rc;
1345 uint32_t boot_flag, addr_mode;
1346 uint32_t next_fcf_index;
1347 unsigned long flags;
1348 uint16_t vlan_id;
1349
1350 /* Get the first SGE entry from the non-embedded DMA memory. This
1351 * routine only uses a single SGE.
1352 */
1353 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1354 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1355 if (unlikely(!mboxq->sge_array)) {
1356 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1357 "2524 Failed to get the non-embedded SGE "
1358 "virtual address\n");
1359 goto out;
1360 }
1361 virt_addr = mboxq->sge_array->addr[0];
1362
1363 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1364 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1365 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1366 &shdr->response);
1367 /*
1368 * The FCF Record was read and there is no reason for the driver
1369 * to maintain the FCF record data or memory. Instead, just need
1370 * to book keeping the FCFIs can be used.
1371 */
1372 if (shdr_status || shdr_add_status) {
1373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1374 "2521 READ_FCF_RECORD mailbox failed "
1375 "with status x%x add_status x%x, mbx\n",
1376 shdr_status, shdr_add_status);
1377 goto out;
1378 }
1379 /* Interpreting the returned information of FCF records */
1380 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1381 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1382 sizeof(struct lpfc_mbx_read_fcf_tbl));
1383 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1384
1385 new_fcf_record = (struct fcf_record *)(virt_addr +
1386 sizeof(struct lpfc_mbx_read_fcf_tbl));
1387 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1388 sizeof(struct fcf_record));
1389 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1390
1391 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1392 &boot_flag, &addr_mode,
1393 &vlan_id);
1394 /*
1395 * If the fcf record does not match with connect list entries
1396 * read the next entry.
1397 */
1398 if (!rc)
1399 goto read_next_fcf;
1400 /*
1401 * If this is not the first FCF discovery of the HBA, use last
1402 * FCF record for the discovery.
1403 */
1404 spin_lock_irqsave(&phba->hbalock, flags);
1405 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1406 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1407 new_fcf_record) &&
1408 lpfc_mac_addr_match(phba, new_fcf_record)) {
1409 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1410 spin_unlock_irqrestore(&phba->hbalock, flags);
1411 goto out;
1412 }
1413 spin_unlock_irqrestore(&phba->hbalock, flags);
1414 goto read_next_fcf;
1415 }
1416 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1417 /*
1418 * If the current FCF record does not have boot flag
1419 * set and new fcf record has boot flag set, use the
1420 * new fcf record.
1421 */
1422 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1423 /* Use this FCF record */
1424 lpfc_copy_fcf_record(phba, new_fcf_record);
1425 phba->fcf.addr_mode = addr_mode;
1426 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1427 if (vlan_id != 0xFFFF) {
1428 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1429 phba->fcf.vlan_id = vlan_id;
1430 }
1431 spin_unlock_irqrestore(&phba->hbalock, flags);
1432 goto read_next_fcf;
1433 }
1434 /*
1435 * If the current FCF record has boot flag set and the
1436 * new FCF record does not have boot flag, read the next
1437 * FCF record.
1438 */
1439 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1440 spin_unlock_irqrestore(&phba->hbalock, flags);
1441 goto read_next_fcf;
1442 }
1443 /*
1444 * If there is a record with lower priority value for
1445 * the current FCF, use that record.
1446 */
1447 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1448 && (new_fcf_record->fip_priority <
1449 phba->fcf.priority)) {
1450 /* Use this FCF record */
1451 lpfc_copy_fcf_record(phba, new_fcf_record);
1452 phba->fcf.addr_mode = addr_mode;
1453 if (vlan_id != 0xFFFF) {
1454 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1455 phba->fcf.vlan_id = vlan_id;
1456 }
1457 spin_unlock_irqrestore(&phba->hbalock, flags);
1458 goto read_next_fcf;
1459 }
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * This is the first available FCF record, use this
1465 * record.
1466 */
1467 lpfc_copy_fcf_record(phba, new_fcf_record);
1468 phba->fcf.addr_mode = addr_mode;
1469 if (boot_flag)
1470 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1471 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1472 if (vlan_id != 0xFFFF) {
1473 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1474 phba->fcf.vlan_id = vlan_id;
1475 }
1476 spin_unlock_irqrestore(&phba->hbalock, flags);
1477 goto read_next_fcf;
1478
1479read_next_fcf:
1480 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1481 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1482 lpfc_register_fcf(phba);
1483 else
1484 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1485 return;
1486
1487out:
1488 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1489 lpfc_register_fcf(phba);
1490
1491 return;
1492}
1493
1494/**
1495 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1496 * @phba: pointer to lpfc hba data structure.
1497 *
1498 * This function loops through the list of vports on the @phba and issues an
1499 * FDISC if possible.
1500 */
1501void
1502lpfc_start_fdiscs(struct lpfc_hba *phba)
1503{
1504 struct lpfc_vport **vports;
1505 int i;
1506
1507 vports = lpfc_create_vport_work_array(phba);
1508 if (vports != NULL) {
1509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1510 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1511 continue;
1512 /* There are no vpi for this vport */
1513 if (vports[i]->vpi > phba->max_vpi) {
1514 lpfc_vport_set_state(vports[i],
1515 FC_VPORT_FAILED);
1516 continue;
1517 }
1518 if (phba->fc_topology == TOPOLOGY_LOOP) {
1519 lpfc_vport_set_state(vports[i],
1520 FC_VPORT_LINKDOWN);
1521 continue;
1522 }
1523 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1524 lpfc_initial_fdisc(vports[i]);
1525 else {
1526 lpfc_vport_set_state(vports[i],
1527 FC_VPORT_NO_FABRIC_SUPP);
1528 lpfc_printf_vlog(vports[i], KERN_ERR,
1529 LOG_ELS,
1530 "0259 No NPIV "
1531 "Fabric support\n");
1532 }
1533 }
1534 }
1535 lpfc_destroy_vport_work_array(phba, vports);
1536}
1537
1538void
1539lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1540{
1541 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1542 struct lpfc_vport *vport = mboxq->vport;
1543
1544 if (mboxq->u.mb.mbxStatus) {
1545 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1546 "2018 REG_VFI mbxStatus error x%x "
1547 "HBA state x%x\n",
1548 mboxq->u.mb.mbxStatus, vport->port_state);
1549 if (phba->fc_topology == TOPOLOGY_LOOP) {
1550 /* FLOGI failed, use loop map to make discovery list */
1551 lpfc_disc_list_loopmap(vport);
1552 /* Start discovery */
1553 lpfc_disc_start(vport);
1554 goto fail_free_mem;
1555 }
1556 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1557 goto fail_free_mem;
1558 }
1559 /* Mark the vport has registered with its VFI */
1560 vport->vfi_state |= LPFC_VFI_REGISTERED;
1561
1562 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1563 lpfc_start_fdiscs(phba);
1564 lpfc_do_scr_ns_plogi(phba, vport);
1565 }
1566
1567fail_free_mem:
1568 mempool_free(mboxq, phba->mbox_mem_pool);
1569 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1570 kfree(dmabuf);
1571 return;
1572}
1573
1574static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1575lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1576{
964 MAILBOX_t *mb = &pmb->mb; 1577 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1578 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1579 struct lpfc_vport *vport = pmb->vport;
967 1580
@@ -1012,13 +1625,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1625lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1626{
1014 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1628 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1629 int i;
1017 struct lpfc_dmabuf *mp; 1630 struct lpfc_dmabuf *mp;
1018 int rc; 1631 int rc;
1632 struct fcf_record *fcf_record;
1019 1633
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1634 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1635
1023 spin_lock_irq(&phba->hbalock); 1636 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1637 switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1647 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1648 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1649 break;
1650 case LA_10GHZ_LINK:
1651 phba->fc_linkspeed = LA_10GHZ_LINK;
1652 break;
1037 default: 1653 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1654 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1655 break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1731 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1732 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1733 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1734 goto out;
1121 } 1735 }
1122 } 1736 }
1123 1737
1124 if (cfglink_mbox) { 1738 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1739 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1740 if (!cfglink_mbox)
1741 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1742 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1743 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1744 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1745 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1746 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1747 if (rc == MBX_NOT_FINISHED) {
1131 return; 1748 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1749 goto out;
1750 }
1751 } else {
1752 /*
1753 * Add the driver's default FCF record at FCF index 0 now. This
1754 * is phase 1 implementation that support FCF index 0 and driver
1755 * defaults.
1756 */
1757 if (phba->cfg_enable_fip == 0) {
1758 fcf_record = kzalloc(sizeof(struct fcf_record),
1759 GFP_KERNEL);
1760 if (unlikely(!fcf_record)) {
1761 lpfc_printf_log(phba, KERN_ERR,
1762 LOG_MBOX | LOG_SLI,
1763 "2554 Could not allocate memmory for "
1764 "fcf record\n");
1765 rc = -ENODEV;
1766 goto out;
1767 }
1768
1769 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1770 LPFC_FCOE_FCF_DEF_INDEX);
1771 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1772 if (unlikely(rc)) {
1773 lpfc_printf_log(phba, KERN_ERR,
1774 LOG_MBOX | LOG_SLI,
1775 "2013 Could not manually add FCF "
1776 "record 0, status %d\n", rc);
1777 rc = -ENODEV;
1778 kfree(fcf_record);
1779 goto out;
1780 }
1781 kfree(fcf_record);
1782 }
1783 /*
1784 * The driver is expected to do FIP/FCF. Call the port
1785 * and get the FCF Table.
1786 */
1787 rc = lpfc_sli4_read_fcf_record(phba,
1788 LPFC_FCOE_FCF_GET_FIRST);
1789 if (rc)
1790 goto out;
1133 } 1791 }
1792
1793 return;
1134out: 1794out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1795 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1807 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1808 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1809 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1810 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1811 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1812 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1813 writel(control, phba->HCregaddr);
1814 readl(phba->HCregaddr); /* flush */
1815 }
1154 spin_unlock_irq(&phba->hbalock); 1816 spin_unlock_irq(&phba->hbalock);
1155} 1817}
1156 1818
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1821{
1160 lpfc_linkdown(phba); 1822 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1823 lpfc_enable_la(phba);
1824 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1825 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1826}
1164 1827
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1838 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1840 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1841 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1842 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1843
1181 /* Unblock ELS traffic */ 1844 /* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1853 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1854 }
1192 1855
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1856 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1857
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1858 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1859
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 1991static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1992lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 1993{
1331 MAILBOX_t *mb = &pmb->mb; 1994 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 1995 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1996 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 1997
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2044{
1382 struct lpfc_vport *vport = pmb->vport; 2045 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2047 MAILBOX_t *mb = &pmb->u.mb;
1385 2048
1386 switch (mb->mbxStatus) { 2049 switch (mb->mbxStatus) {
1387 case 0x0011: 2050 case 0x0011:
@@ -1416,6 +2079,128 @@ out:
1416 return; 2079 return;
1417} 2080}
1418 2081
2082/**
2083 * lpfc_create_static_vport - Read HBA config region to create static vports.
2084 * @phba: pointer to lpfc hba data structure.
2085 *
2086 * This routine issue a DUMP mailbox command for config region 22 to get
2087 * the list of static vports to be created. The function create vports
2088 * based on the information returned from the HBA.
2089 **/
2090void
2091lpfc_create_static_vport(struct lpfc_hba *phba)
2092{
2093 LPFC_MBOXQ_t *pmb = NULL;
2094 MAILBOX_t *mb;
2095 struct static_vport_info *vport_info;
2096 int rc, i;
2097 struct fc_vport_identifiers vport_id;
2098 struct fc_vport *new_fc_vport;
2099 struct Scsi_Host *shost;
2100 struct lpfc_vport *vport;
2101 uint16_t offset = 0;
2102 uint8_t *vport_buff;
2103
2104 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2105 if (!pmb) {
2106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2107 "0542 lpfc_create_static_vport failed to"
2108 " allocate mailbox memory\n");
2109 return;
2110 }
2111
2112 mb = &pmb->u.mb;
2113
2114 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2115 if (!vport_info) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2117 "0543 lpfc_create_static_vport failed to"
2118 " allocate vport_info\n");
2119 mempool_free(pmb, phba->mbox_mem_pool);
2120 return;
2121 }
2122
2123 vport_buff = (uint8_t *) vport_info;
2124 do {
2125 lpfc_dump_static_vport(phba, pmb, offset);
2126 pmb->vport = phba->pport;
2127 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2128
2129 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2130 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2131 "0544 lpfc_create_static_vport failed to"
2132 " issue dump mailbox command ret 0x%x "
2133 "status 0x%x\n",
2134 rc, mb->mbxStatus);
2135 goto out;
2136 }
2137
2138 if (mb->un.varDmp.word_cnt >
2139 sizeof(struct static_vport_info) - offset)
2140 mb->un.varDmp.word_cnt =
2141 sizeof(struct static_vport_info) - offset;
2142
2143 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2144 vport_buff + offset,
2145 mb->un.varDmp.word_cnt);
2146 offset += mb->un.varDmp.word_cnt;
2147
2148 } while (mb->un.varDmp.word_cnt &&
2149 offset < sizeof(struct static_vport_info));
2150
2151
2152 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2153 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2154 != VPORT_INFO_REV)) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2156 "0545 lpfc_create_static_vport bad"
2157 " information header 0x%x 0x%x\n",
2158 le32_to_cpu(vport_info->signature),
2159 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2160
2161 goto out;
2162 }
2163
2164 shost = lpfc_shost_from_vport(phba->pport);
2165
2166 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2167 memset(&vport_id, 0, sizeof(vport_id));
2168 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2169 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2170 if (!vport_id.port_name || !vport_id.node_name)
2171 continue;
2172
2173 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2174 vport_id.vport_type = FC_PORTTYPE_NPIV;
2175 vport_id.disable = false;
2176 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2177
2178 if (!new_fc_vport) {
2179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2180 "0546 lpfc_create_static_vport failed to"
2181 " create vport \n");
2182 continue;
2183 }
2184
2185 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2186 vport->vport_flag |= STATIC_VPORT;
2187 }
2188
2189out:
2190 /*
2191 * If this is timed out command, setting NULL to context2 tell SLI
2192 * layer not to use this buffer.
2193 */
2194 spin_lock_irq(&phba->hbalock);
2195 pmb->context2 = NULL;
2196 spin_unlock_irq(&phba->hbalock);
2197 kfree(vport_info);
2198 if (rc != MBX_TIMEOUT)
2199 mempool_free(pmb, phba->mbox_mem_pool);
2200
2201 return;
2202}
2203
1419/* 2204/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2205 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2206 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2211lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2212{
1428 struct lpfc_vport *vport = pmb->vport; 2213 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2214 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2215 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2216 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2217
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2218 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2219 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2220 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2221 if (mb->mbxStatus) {
2222 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2223 "0258 Register Fabric login error: 0x%x\n",
2224 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2226 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2227 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2240 }
1455 2241
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2243 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2244 * to the ndlp are done.
1462 */ 2245 */
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2248 }
1466 2249
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2250 ndlp->nlp_rpi = mb->un.varWords[0];
2251 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2252 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2254
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2255 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2256 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2257 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2258 }
1498 2259
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2277void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2278lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2279{
1519 MAILBOX_t *mb = &pmb->mb; 2280 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2281 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2282 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2283 struct lpfc_vport *vport = pmb->vport;
1523 2284
1524 if (mb->mbxStatus) { 2285 if (mb->mbxStatus) {
1525out: 2286out:
2287 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2288 "0260 Register NameServer error: 0x%x\n",
2289 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2290 /* decrement the node reference count held for this
1527 * callback function. 2291 * callback function.
1528 */ 2292 */
@@ -1546,15 +2310,13 @@ out:
1546 return; 2310 return;
1547 } 2311 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2313 return;
1553 } 2314 }
1554 2315
1555 pmb->context1 = NULL; 2316 pmb->context1 = NULL;
1556 2317
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2318 ndlp->nlp_rpi = mb->un.varWords[0];
2319 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2320 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2321 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2322
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2817 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2818 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2819 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2820 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2821 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2822 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2823 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2864 */
2103 psli = &phba->sli; 2865 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2866 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2867 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2868 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2869 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2870 pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2912 LPFC_MBOXQ_t *mbox;
2151 int rc; 2913 int rc;
2152 2914
2153 if (ndlp->nlp_rpi) { 2915 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2916 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2917 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2918 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2924 }
2163 lpfc_no_rpi(phba, ndlp); 2925 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2926 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2165 return 1; 2928 return 1;
2166 } 2929 }
2167 return 0; 2930 return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3015
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3016 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3017 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3018 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3019 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3020 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3021 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3024
2262 spin_lock_irq(&phba->hbalock); 3025 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3026 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3027 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3028 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3029 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3030 if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3072 int rc;
2310 3073
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3074 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3075 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3076 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3077 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3078 * allocated by the firmware.
2315 */ 3079 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3080 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3081 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3082 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3083 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3084 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3085 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3317 * clear_la then don't send it.
2554 */ 3318 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3319 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3320 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3321 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3322 return;
2558 3323
2559 /* Link up discovery */ 3324 /* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3347
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3348 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3349 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3350 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3351 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3352 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3353 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3407 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3408 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3409 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3410 !(vport->fc_flag & FC_RSCN_MODE) &&
3411 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3412 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3413 return;
2648 } 3414 }
@@ -2919,11 +3685,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3685 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3686 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3687 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3688 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3689 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3690 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3691 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3692 lpfc_issue_clear_la(phba, vport);
3693 vport->port_state = LPFC_VPORT_READY;
3694 }
2927 } 3695 }
2928 3696
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3697 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3707 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3708 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3709 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3710 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3711 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3712 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3713 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3727 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3728 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3729 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3730 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3732 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3733 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3734 lpfc_issue_clear_la(phba, vport);
3735 vport->port_state = LPFC_VPORT_READY;
3736 }
2967 } 3737 }
2968 break; 3738 break;
2969 3739
@@ -3036,7 +3806,7 @@ restart_disc:
3036void 3806void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3807lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3808{
3039 MAILBOX_t *mb = &pmb->mb; 3809 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3810 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3811 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3812 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3814 pmb->context1 = NULL;
3045 3815
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3816 ndlp->nlp_rpi = mb->un.varWords[0];
3817 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3818 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3819 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3820
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4068 return 1;
3298 return 0; 4069 return 0;
3299} 4070}
4071
4072/**
4073 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4074 * @phba: Pointer to hba context object.
4075 *
4076 * This function iterate through all FC nodes associated
4077 * will all vports to check if there is any node with
4078 * fc_rports associated with it. If there is an fc_rport
4079 * associated with the node, then the node is either in
4080 * discovered state or its devloss_timer is pending.
4081 */
4082static int
4083lpfc_fcf_inuse(struct lpfc_hba *phba)
4084{
4085 struct lpfc_vport **vports;
4086 int i, ret = 0;
4087 struct lpfc_nodelist *ndlp;
4088 struct Scsi_Host *shost;
4089
4090 vports = lpfc_create_vport_work_array(phba);
4091
4092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4093 shost = lpfc_shost_from_vport(vports[i]);
4094 spin_lock_irq(shost->host_lock);
4095 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4096 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4097 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4098 ret = 1;
4099 spin_unlock_irq(shost->host_lock);
4100 goto out;
4101 }
4102 }
4103 spin_unlock_irq(shost->host_lock);
4104 }
4105out:
4106 lpfc_destroy_vport_work_array(phba, vports);
4107 return ret;
4108}
4109
4110/**
4111 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4112 * @phba: Pointer to hba context object.
4113 * @mboxq: Pointer to mailbox object.
4114 *
4115 * This function frees memory associated with the mailbox command.
4116 */
4117static void
4118lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4119{
4120 struct lpfc_vport *vport = mboxq->vport;
4121
4122 if (mboxq->u.mb.mbxStatus) {
4123 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4124 "2555 UNREG_VFI mbxStatus error x%x "
4125 "HBA state x%x\n",
4126 mboxq->u.mb.mbxStatus, vport->port_state);
4127 }
4128 mempool_free(mboxq, phba->mbox_mem_pool);
4129 return;
4130}
4131
4132/**
4133 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4134 * @phba: Pointer to hba context object.
4135 * @mboxq: Pointer to mailbox object.
4136 *
4137 * This function frees memory associated with the mailbox command.
4138 */
4139static void
4140lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4141{
4142 struct lpfc_vport *vport = mboxq->vport;
4143
4144 if (mboxq->u.mb.mbxStatus) {
4145 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4146 "2550 UNREG_FCFI mbxStatus error x%x "
4147 "HBA state x%x\n",
4148 mboxq->u.mb.mbxStatus, vport->port_state);
4149 }
4150 mempool_free(mboxq, phba->mbox_mem_pool);
4151 return;
4152}
4153
4154/**
4155 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4156 * @phba: Pointer to hba context object.
4157 *
4158 * This function check if there are any connected remote port for the FCF and
4159 * if all the devices are disconnected, this function unregister FCFI.
4160 * This function also tries to use another FCF for discovery.
4161 */
4162void
4163lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4164{
4165 LPFC_MBOXQ_t *mbox;
4166 int rc;
4167 struct lpfc_vport **vports;
4168 int i;
4169
4170 spin_lock_irq(&phba->hbalock);
4171 /*
4172 * If HBA is not running in FIP mode or
4173 * If HBA does not support FCoE or
4174 * If FCF is not registered.
4175 * do nothing.
4176 */
4177 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4178 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4179 (phba->cfg_enable_fip == 0)) {
4180 spin_unlock_irq(&phba->hbalock);
4181 return;
4182 }
4183 spin_unlock_irq(&phba->hbalock);
4184
4185 if (lpfc_fcf_inuse(phba))
4186 return;
4187
4188
4189 /* Unregister VPIs */
4190 vports = lpfc_create_vport_work_array(phba);
4191 if (vports &&
4192 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4193 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4194 lpfc_mbx_unreg_vpi(vports[i]);
4195 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4197 }
4198 lpfc_destroy_vport_work_array(phba, vports);
4199
4200 /* Unregister VFI */
4201 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4202 if (!mbox) {
4203 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4204 "2556 UNREG_VFI mbox allocation failed"
4205 "HBA state x%x\n",
4206 phba->pport->port_state);
4207 return;
4208 }
4209
4210 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4211 mbox->vport = phba->pport;
4212 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4213
4214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4215 if (rc == MBX_NOT_FINISHED) {
4216 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4217 "2557 UNREG_VFI issue mbox failed rc x%x "
4218 "HBA state x%x\n",
4219 rc, phba->pport->port_state);
4220 mempool_free(mbox, phba->mbox_mem_pool);
4221 return;
4222 }
4223
4224 /* Unregister FCF */
4225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4226 if (!mbox) {
4227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4228 "2551 UNREG_FCFI mbox allocation failed"
4229 "HBA state x%x\n",
4230 phba->pport->port_state);
4231 return;
4232 }
4233
4234 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4235 mbox->vport = phba->pport;
4236 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4238
4239 if (rc == MBX_NOT_FINISHED) {
4240 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4241 "2552 UNREG_FCFI issue mbox failed rc x%x "
4242 "HBA state x%x\n",
4243 rc, phba->pport->port_state);
4244 mempool_free(mbox, phba->mbox_mem_pool);
4245 return;
4246 }
4247
4248 spin_lock_irq(&phba->hbalock);
4249 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4250 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4251 FCF_VALID_VLAN);
4252 spin_unlock_irq(&phba->hbalock);
4253
4254 /*
4255 * If driver is not unloading, check if there is any other
4256 * FCF record that can be used for discovery.
4257 */
4258 if ((phba->pport->load_flag & FC_UNLOADING) ||
4259 (phba->link_state < LPFC_LINK_UP))
4260 return;
4261
4262 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4263
4264 if (rc)
4265 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4266 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4267 " record HBA state x%x\n",
4268 phba->pport->port_state);
4269}
4270
4271/**
4272 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4273 * @phba: Pointer to hba context object.
4274 * @buff: Buffer containing the FCF connection table as in the config
4275 * region.
4276 * This function create driver data structure for the FCF connection
4277 * record table read from config region 23.
4278 */
4279static void
4280lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4281 uint8_t *buff)
4282{
4283 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4284 struct lpfc_fcf_conn_hdr *conn_hdr;
4285 struct lpfc_fcf_conn_rec *conn_rec;
4286 uint32_t record_count;
4287 int i;
4288
4289 /* Free the current connect table */
4290 list_for_each_entry_safe(conn_entry, next_conn_entry,
4291 &phba->fcf_conn_rec_list, list)
4292 kfree(conn_entry);
4293
4294 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4295 record_count = conn_hdr->length * sizeof(uint32_t)/
4296 sizeof(struct lpfc_fcf_conn_rec);
4297
4298 conn_rec = (struct lpfc_fcf_conn_rec *)
4299 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4300
4301 for (i = 0; i < record_count; i++) {
4302 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4303 continue;
4304 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4305 GFP_KERNEL);
4306 if (!conn_entry) {
4307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4308 "2566 Failed to allocate connection"
4309 " table entry\n");
4310 return;
4311 }
4312
4313 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4314 sizeof(struct lpfc_fcf_conn_rec));
4315 conn_entry->conn_rec.vlan_tag =
4316 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4317 conn_entry->conn_rec.flags =
4318 le16_to_cpu(conn_entry->conn_rec.flags);
4319 list_add_tail(&conn_entry->list,
4320 &phba->fcf_conn_rec_list);
4321 }
4322}
4323
4324/**
4325 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4326 * @phba: Pointer to hba context object.
4327 * @buff: Buffer containing the FCoE parameter data structure.
4328 *
4329 * This function update driver data structure with config
4330 * parameters read from config region 23.
4331 */
4332static void
4333lpfc_read_fcoe_param(struct lpfc_hba *phba,
4334 uint8_t *buff)
4335{
4336 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4337 struct lpfc_fcoe_params *fcoe_param;
4338
4339 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4340 buff;
4341 fcoe_param = (struct lpfc_fcoe_params *)
4342 buff + sizeof(struct lpfc_fip_param_hdr);
4343
4344 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4345 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4346 return;
4347
4348 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4349 FIPP_MODE_ON)
4350 phba->cfg_enable_fip = 1;
4351
4352 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4353 FIPP_MODE_OFF)
4354 phba->cfg_enable_fip = 0;
4355
4356 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4357 phba->valid_vlan = 1;
4358 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4359 0xFFF;
4360 }
4361
4362 phba->fc_map[0] = fcoe_param->fc_map[0];
4363 phba->fc_map[1] = fcoe_param->fc_map[1];
4364 phba->fc_map[2] = fcoe_param->fc_map[2];
4365 return;
4366}
4367
4368/**
4369 * lpfc_get_rec_conf23 - Get a record type in config region data.
4370 * @buff: Buffer containing config region 23 data.
4371 * @size: Size of the data buffer.
4372 * @rec_type: Record type to be searched.
4373 *
4374 * This function searches config region data to find the begining
4375 * of the record specified by record_type. If record found, this
4376 * function return pointer to the record else return NULL.
4377 */
4378static uint8_t *
4379lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4380{
4381 uint32_t offset = 0, rec_length;
4382
4383 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4384 (size < sizeof(uint32_t)))
4385 return NULL;
4386
4387 rec_length = buff[offset + 1];
4388
4389 /*
4390 * One TLV record has one word header and number of data words
4391 * specified in the rec_length field of the record header.
4392 */
4393 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4394 <= size) {
4395 if (buff[offset] == rec_type)
4396 return &buff[offset];
4397
4398 if (buff[offset] == LPFC_REGION23_LAST_REC)
4399 return NULL;
4400
4401 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4402 rec_length = buff[offset + 1];
4403 }
4404 return NULL;
4405}
4406
4407/**
4408 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4409 * @phba: Pointer to lpfc_hba data structure.
4410 * @buff: Buffer containing config region 23 data.
4411 * @size: Size of the data buffer.
4412 *
4413 * This fuction parse the FCoE config parameters in config region 23 and
4414 * populate driver data structure with the parameters.
4415 */
4416void
4417lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4418 uint8_t *buff,
4419 uint32_t size)
4420{
4421 uint32_t offset = 0, rec_length;
4422 uint8_t *rec_ptr;
4423
4424 /*
4425 * If data size is less than 2 words signature and version cannot be
4426 * verified.
4427 */
4428 if (size < 2*sizeof(uint32_t))
4429 return;
4430
4431 /* Check the region signature first */
4432 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2567 Config region 23 has bad signature\n");
4435 return;
4436 }
4437
4438 offset += 4;
4439
4440 /* Check the data structure version */
4441 if (buff[offset] != LPFC_REGION23_VERSION) {
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443 "2568 Config region 23 has bad version\n");
4444 return;
4445 }
4446 offset += 4;
4447
4448 rec_length = buff[offset + 1];
4449
4450 /* Read FCoE param record */
4451 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4452 size - offset, FCOE_PARAM_TYPE);
4453 if (rec_ptr)
4454 lpfc_read_fcoe_param(phba, rec_ptr);
4455
4456 /* Read FCF connection table */
4457 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4458 size - offset, FCOE_CONN_TBL_TYPE);
4459 if (rec_ptr)
4460 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4461
4462}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b8..02aa016b93e9 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,9 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1155 1187
1156#define JEDEC_ID_ADDRESS 0x0080001c 1188#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1189#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1374#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1375#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1376#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1377
1348#define MBX_WRITE_WWN 0x98 1378#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1379#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1380#define MBX_LOAD_EXP_ROM 0x9C
1351 1381#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1382#define MBX_SLI4_REQ_FTRS 0x9D
1383#define MBX_MAX_CMDS 0x9E
1384#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1385#define MBX_SLI2_CMD_MASK 0x80
1386#define MBX_REG_VFI 0x9F
1387#define MBX_REG_FCFI 0xA0
1388#define MBX_UNREG_VFI 0xA1
1389#define MBX_UNREG_FCFI 0xA2
1390#define MBX_INIT_VFI 0xA3
1391#define MBX_INIT_VPI 0xA4
1354 1392
1355/* IOCB Commands */ 1393/* IOCB Commands */
1356 1394
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1478#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1479#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1480
1481/* Unhandled Data Security SLI Commands */
1482#define DSSCMD_IWRITE64_CR 0xD8
1483#define DSSCMD_IWRITE64_CX 0xD9
1484#define DSSCMD_IREAD64_CR 0xDA
1485#define DSSCMD_IREAD64_CX 0xDB
1486#define DSSCMD_INVALIDATE_DEK 0xDC
1487#define DSSCMD_SET_KEK 0xDD
1488#define DSSCMD_GET_KEK_ID 0xDE
1489#define DSSCMD_GEN_XFER 0xDF
1490
1443#define CMD_MAX_IOCB_CMD 0xE6 1491#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1492#define CMD_IOCB_MASK 0xff
1445 1493
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1514#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1515#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1516#define MBXERR_ERROR 16
1517#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1518#define MBX_NOT_FINISHED 255
1470 1519
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1520#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
1504#endif 1553#endif
1505}; 1554};
1506 1555
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1556typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1557#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1558 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
2287 uint32_t rsvd3; 2310 uint32_t rsvd3;
2288 uint32_t rsvd4; 2311 uint32_t rsvd4;
2289 uint32_t rsvd5; 2312 uint32_t rsvd5;
2290 uint16_t rsvd6; 2313 uint16_t vfi;
2291 uint16_t vpi; 2314 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2315#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2316 uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
2297 uint32_t rsvd4; 2320 uint32_t rsvd4;
2298 uint32_t rsvd5; 2321 uint32_t rsvd5;
2299 uint16_t vpi; 2322 uint16_t vpi;
2300 uint16_t rsvd6; 2323 uint16_t vfi;
2301#endif 2324#endif
2302} REG_VPI_VAR; 2325} REG_VPI_VAR;
2303 2326
@@ -2457,7 +2480,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2480 uint32_t entry_index:16;
2458#endif 2481#endif
2459 2482
2460 uint32_t rsvd1; 2483 uint32_t sli4_length;
2461 uint32_t word_cnt; 2484 uint32_t word_cnt;
2462 uint32_t resp_offset; 2485 uint32_t resp_offset;
2463} DUMP_VAR; 2486} DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2493#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2494#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2495
2496#define DMP_REGION_VPORT 0x16 /* VPort info region */
2497#define DMP_VPORT_REGION_SIZE 0x200
2498#define DMP_MBOX_OFFSET_WORD 0x5
2499
2500#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2501#define DMP_FCOEPARAM_RGN_SIZE 0x400
2502
2473#define WAKE_UP_PARMS_REGION_ID 4 2503#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2504#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2505
2506struct vport_rec {
2507 uint8_t wwpn[8];
2508 uint8_t wwnn[8];
2509};
2510
2511#define VPORT_INFO_SIG 0x32324752
2512#define VPORT_INFO_REV_MASK 0xff
2513#define VPORT_INFO_REV 0x1
2514#define MAX_STATIC_VPORT_COUNT 16
2515struct static_vport_info {
2516 uint32_t signature;
2517 uint32_t rev;
2518 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2519 uint32_t resvd[66];
2520};
2521
2476/* Option rom version structure */ 2522/* Option rom version structure */
2477struct prog_id { 2523struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2524#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
2697#endif 2743#endif
2698 2744
2699#ifdef __BIG_ENDIAN_BITFIELD 2745#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2746 uint32_t rsvd1 : 19; /* Reserved */
2747 uint32_t cdss : 1; /* Configure Data Security SLI */
2748 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2749 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2750 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2751 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2765,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2765 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2766 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2767 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2768 uint32_t rsvd2 : 3; /* Reserved */
2769 uint32_t cdss : 1; /* Configure Data Security SLI */
2770 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2771#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2772#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2773 uint32_t rsvd3 : 19; /* Reserved */
2774 uint32_t gdss : 1; /* Configure Data Security SLI */
2775 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2776 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2777 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2778 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2792,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2792 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2793 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2794 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2795 uint32_t rsvd4 : 3; /* Reserved */
2796 uint32_t gdss : 1; /* Configure Data Security SLI */
2797 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2798#endif
2745 2799
2746#ifdef __BIG_ENDIAN_BITFIELD 2800#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
2753 2807
2754#ifdef __BIG_ENDIAN_BITFIELD 2808#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2810 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2811#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2813 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2814#endif
2761 2815
2762 uint32_t rsvd4; /* Reserved */ 2816 uint32_t rsvd6; /* Reserved */
2763 2817
2764#ifdef __BIG_ENDIAN_BITFIELD 2818#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2819 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2820 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2821#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2822 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2823 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2824#endif
2771 2825
2772} CONFIG_PORT_VAR; 2826} CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3720#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3721#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3722#define SETVAR_MLORST 0x103007
3723
3724#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 000000000000..39c34b3ad29d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d8..2f5907f92eea 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 483 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 484 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 487 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 488 return -EIO;
466 } 489 }
467 } 490 }
468 491
492 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 493 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 495
472 /* Enable appropriate host interrupts */ 496 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 497 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 499 if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 594{
572 struct lpfc_vport **vports; 595 struct lpfc_vport **vports;
573 int i; 596 int i;
574 /* Disable interrupts */ 597
575 writel(0, phba->HCregaddr); 598 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 }
577 603
578 if (phba->pport->load_flag & FC_UNLOADING) 604 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 605 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 606 else {
581 vports = lpfc_create_vport_work_array(phba); 607 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 608 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 611 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 612 lpfc_destroy_vport_work_array(phba, vports);
586 } 613 }
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 615}
589 616
590/** 617/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 619 * @phba: pointer to lpfc HBA data structure.
593 * 620 *
594 * This routine will do uninitialization after the HBA is reset when bring 621 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 625 * 0 - sucess.
599 * Any other value - error. 626 * Any other value - error.
600 **/ 627 **/
601int 628static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 629lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 630{
604 struct lpfc_sli *psli = &phba->sli; 631 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 632 struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 669
643 return 0; 670 return 0;
644} 671}
672/**
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
675 *
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
678 *
679 * Return codes
680 * 0 - sucess.
681 * Any other value - error.
682 **/
683static int
684lpfc_hba_down_post_s4(struct lpfc_hba *phba)
685{
686 struct lpfc_scsi_buf *psb, *psb_next;
687 LIST_HEAD(aborts);
688 int ret;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
691 if (ret)
692 return ret;
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
697 * the port.
698 */
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
700 /* scsl_buf_list */
701 /* abts_sgl_list_lock required because worker thread uses this
702 * list.
703 */
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
709 * list.
710 */
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
713 &aborts);
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
716
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
718 psb->pCmd = NULL;
719 psb->status = IOSTAT_SUCCESS;
720 }
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
724 return 0;
725}
726
727/**
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
730 *
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
733 *
734 * Return codes
735 * 0 - sucess.
736 * Any other value - error.
737 **/
738int
739lpfc_hba_down_post(struct lpfc_hba *phba)
740{
741 return (*phba->lpfc_hba_down_post)(phba);
742}
645 743
646/** 744/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 745 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 907 "taking this port offline.\n");
810 908
811 spin_lock_irq(&phba->hbalock); 909 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 911 spin_unlock_irq(&phba->hbalock);
814 912
815 lpfc_offline_prep(phba); 913 lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 932 struct lpfc_sli *psli = &phba->sli;
835 933
836 spin_lock_irq(&phba->hbalock); 934 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 936 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 937 lpfc_offline_prep(phba);
840 938
841 lpfc_offline(phba); 939 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 940 lpfc_reset_barrier(phba);
941 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 942 lpfc_sli_brdreset(phba);
943 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 944 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 945 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 946 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 949}
850 950
851/** 951/**
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
954 *
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
957 **/
958static void
959lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
960{
961 lpfc_offline_prep(phba);
962 lpfc_offline(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
968}
969
970/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 972 * @phba: pointer to lpfc hba data structure.
854 * 973 *
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 983 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 984 struct lpfc_sli *psli = &phba->sli;
866 985
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
988 */
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
993 return;
994 }
995
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 997 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 998 "Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1000 phba->work_status[0], phba->work_status[1]);
872 1001
873 spin_lock_irq(&phba->hbalock); 1002 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1004 spin_unlock_irq(&phba->hbalock);
876 1005
877 1006
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1039 phba->work_hs = old_host_status & ~HS_FFER1;
911 1040
1041 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1042 phba->hba_flag &= ~DEFER_ERATT;
1043 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1046}
916 1047
1048static void
1049lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1050{
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1053
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1060 LPFC_NL_VENDOR_ID);
1061}
1062
917/** 1063/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1065 * @phba: pointer to lpfc hba data structure.
920 * 1066 *
921 * This routine is invoked to handle the following HBA hardware error 1067 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1070 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1071 * 3 - Mailbox command came back as unknown
926 **/ 1072 **/
927void 1073static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1074lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1075{
930 struct lpfc_vport *vport = phba->pport; 1076 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1077 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1080 unsigned long temperature;
935 struct temp_event temp_event_data; 1081 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1082 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1083
939 /* If the pci channel is offline, ignore possible errors, 1084 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1085 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1086 */
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
942 return; 1091 return;
1092 }
1093
943 /* If resets are disabled then leave the HBA alone and return */ 1094 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1095 if (!phba->cfg_enable_hba_reset)
945 return; 1096 return;
946 1097
947 /* Send an internal error event to mgmt application */ 1098 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1099 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1100
956 if (phba->hba_flag & DEFER_ERATT) 1101 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1102 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1110 phba->work_status[0], phba->work_status[1]);
966 1111
967 spin_lock_irq(&phba->hbalock); 1112 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1114 spin_unlock_irq(&phba->hbalock);
970 1115
971 /* 1116 /*
@@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1182}
1038 1183
1039/** 1184/**
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1189 * conditions.
1190 **/
1191static void
1192lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1193{
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1197
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev))
1202 return;
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1205 return;
1206
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1209
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1212 */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1216
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1222
1223 lpfc_sli4_offline_eratt(phba);
1224}
1225
1226/**
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1229 *
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1232 *
1233 * Return codes
1234 * 0 - sucess.
1235 * Any other value - error.
1236 **/
1237void
1238lpfc_handle_eratt(struct lpfc_hba *phba)
1239{
1240 (*phba->lpfc_handle_eratt)(phba);
1241}
1242
1243/**
1040 * lpfc_handle_latt - The HBA link event handler 1244 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1245 * @phba: pointer to lpfc hba data structure.
1042 * 1246 *
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1341 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1342 * 1 - success
1139 **/ 1343 **/
1140static int 1344int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1345lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1346{
1143 uint8_t lenlo, lenhi; 1347 uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1496 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1497 int max_speed;
1294 int GE = 0; 1498 int GE = 0;
1499 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1500 struct {
1296 char * name; 1501 char * name;
1297 int max_speed; 1502 int max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1642 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1644 break;
1645 case PCI_DEVICE_ID_TIGERSHARK:
1646 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1440 default: 1653 default:
1441 m = (typeof(m)){ NULL }; 1654 m = (typeof(m)){ NULL };
1442 break; 1655 break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1657
1445 if (mdp && mdp[0] == '\0') 1658 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1659 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1660 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1661 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1662 */
1450 m.name, m.max_speed, 1663 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1664 if (oneConnect)
1452 m.bus, 1665 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1667 m.name,
1668 phba->Port);
1669 else
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1673 (GE) ? "GE" : "Gb",
1674 m.bus,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1677 }
1454} 1678}
1455 1679
1456/** 1680/**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1758 icmd->ulpLe = 1;
1535 1759
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1761 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1763 kfree(mp1);
1539 cnt++; 1764 cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1986 * Lets wait for this to happen, if needed.
1762 */ 1987 */
1763 while (!list_empty(&vport->fc_nodes)) { 1988 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1989 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1991 "0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2006 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2007 msleep(10);
1784 } 2008 }
1785 return;
1786} 2009}
1787 2010
1788/** 2011/**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2026}
1804 2027
1805/** 2028/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2030 * @phba: pointer to lpfc hba data structure.
1808 * 2031 *
1809 * This routine stops all the timers associated with a HBA. This function is 2032 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2033 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2034 **/
1812static void 2035void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2036lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2037{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2038 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2039 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2040 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2041 del_timer_sync(&phba->eratt_poll);
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2044
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2049 break;
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2052 break;
2053 default:
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2056 phba->pci_dev_grp);
2057 break;
2058 }
1822 return; 2059 return;
1823} 2060}
1824 2061
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2115 return 1;
1879 } 2116 }
1880 2117
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2118 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2120 lpfc_unblock_mgmt_io(phba);
2121 return 1;
2122 }
2123 } else {
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2126 return 1;
2127 }
1884 } 2128 }
1885 2129
1886 vports = lpfc_create_vport_work_array(phba); 2130 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2131 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2133 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2134 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2135 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2191 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2192 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2193 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2195 struct Scsi_Host *shost;
1952 2196
1953 if (vports[i]->load_flag & FC_UNLOADING) 2197 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2198 continue;
2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2200 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2201 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2202 &vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2220 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2221 lpfc_destroy_vport_work_array(phba, vports);
1977 2222
1978 lpfc_sli_flush_mbox_queue(phba); 2223 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2224}
1980 2225
1981/** 2226/**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2242 return;
1998 2243
1999 /* stop all timers associated with this hba */ 2244 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2245 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2246 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2247 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2249 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2250 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2258 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2259 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2260 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2262 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2263 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2264 vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2351 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2352 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2353 shost->max_cmd_len = 16;
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2357 }
2109 2358
2110 /* 2359 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2360 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2372
2124 /* Initialize all internally managed lists. */ 2373 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2374 INIT_LIST_HEAD(&vport->fc_nodes);
2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2376 spin_lock_init(&vport->work_port_lock);
2127 2377
2128 init_timer(&vport->fc_disctmo); 2378 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2564}
2315 2565
2316/** 2566/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2567 * lpfc_stop_port_s3 - Stop SLI3 device port
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2572 * device.
2573 **/
2574static void
2575lpfc_stop_port_s3(struct lpfc_hba *phba)
2576{
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
2583
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2587}
2588
2589/**
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2592 *
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2595 * device.
2596 **/
2597static void
2598lpfc_stop_port_s4(struct lpfc_hba *phba)
2599{
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2606}
2607
2608/**
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2611 *
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2614 **/
2615void
2616lpfc_stop_port(struct lpfc_hba *phba)
2617{
2618 phba->lpfc_stop_port(phba);
2619}
2620
2621/**
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2624 *
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2627 *
2628 **/
2629void
2630lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2631{
2632 int rc = 0;
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
2637
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2639 if (!mboxq) {
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2642 return;
2643 }
2644
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
2650 /*
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
2653 */
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
2658
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2661 else {
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2664 }
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
2675 }
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
2678}
2679
2680/**
2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2682 * @phba: pointer to lpfc hba data structure.
2683 * @acqe_link: pointer to the async link completion queue entry.
2684 *
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2687 * status.
2688 *
2689 * Return: Link-attention status in terms of base driver's coding.
2690 **/
2691static uint16_t
2692lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
2694{
2695 uint16_t latt_fault;
2696
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2701 latt_fault = 0;
2702 break;
2703 default:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2708 break;
2709 }
2710 return latt_fault;
2711}
2712
2713/**
2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2715 * @phba: pointer to lpfc hba data structure.
2716 * @acqe_link: pointer to the async link completion queue entry.
2717 *
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
2720 *
2721 * Return: Link attention type in terms of base driver's coding.
2722 **/
2723static uint8_t
2724lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
2726{
2727 uint8_t att_type;
2728
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2733 break;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2737 break;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2740 break;
2741 default:
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2746 break;
2747 }
2748 return att_type;
2749}
2750
2751/**
2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2753 * @phba: pointer to lpfc hba data structure.
2754 * @acqe_link: pointer to the async link completion queue entry.
2755 *
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2758 *
2759 * Return: Link-attention link speed in terms of base driver's coding.
2760 **/
2761static uint8_t
2762lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
2764{
2765 uint8_t link_speed;
2766
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2779 break;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2782 break;
2783 default:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2788 break;
2789 }
2790 return link_speed;
2791}
2792
2793/**
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2797 *
2798 * This routine is to handle the SLI4 asynchronous link event.
2799 **/
2800static void
2801lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2803{
2804 struct lpfc_dmabuf *mp;
2805 LPFC_MBOXQ_t *pmb;
2806 MAILBOX_t *mb;
2807 READ_LA_VAR *la;
2808 uint8_t att_type;
2809
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2812 return;
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2814 if (!pmb) {
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2817 return;
2818 }
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2820 if (!mp) {
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2823 goto out_free_pmb;
2824 }
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2826 if (!mp->virt) {
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2830 }
2831
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2834
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2837
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2840
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2844
2845 /* Parse and translate status field */
2846 mb = &pmb->u.mb;
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2848
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2854
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2858 la->il = 0;
2859 la->pb = 0;
2860 la->fa = 0;
2861 la->mm = 0;
2862
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2874
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2877
2878 return;
2879
2880out_free_dmabuf:
2881 kfree(mp);
2882out_free_pmb:
2883 mempool_free(pmb, phba->mbox_mem_pool);
2884}
2885
2886/**
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2890 *
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2892 **/
2893static void
2894lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2896{
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2898 int rc;
2899
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2906 /*
2907 * If the current FCF is in discovered state,
2908 * do nothing.
2909 */
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2913 break;
2914 }
2915 spin_unlock_irq(&phba->hbalock);
2916
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2920 if (rc)
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2923 rc);
2924 break;
2925
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2931 break;
2932
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2940 break;
2941 /*
2942 * Currently, driver support only one FCF - so treat this as
2943 * a link down.
2944 */
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2948 break;
2949
2950 default:
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2954 break;
2955 }
2956}
2957
2958/**
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2962 *
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2964 **/
2965static void
2966lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2968{
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2971 "handled yet\n");
2972}
2973
2974/**
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2977 *
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2980 **/
2981void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2982{
2983 struct lpfc_cq_event *cq_event;
2984
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3001 break;
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3005 break;
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3009 break;
3010 default:
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3015 break;
3016 }
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3019 }
3020}
3021
3022/**
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3026 *
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3029 *
3030 * Return: 0 if success, otherwise -ENODEV
3031 **/
3032int
3033lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3034{
3035 int rc;
3036
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3039
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3043
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3046 if (rc)
3047 return -ENODEV;
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3050 if (rc)
3051 return -ENODEV;
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3054 if (rc)
3055 return -ENODEV;
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3058 if (rc)
3059 return -ENODEV;
3060
3061 return 0;
3062}
3063
3064/**
3065 * lpfc_log_intr_mode - Log the active interrupt mode
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3068 *
3069 * This routine it invoked to log the currently used active interrupt mode
3070 * to the device.
3071 **/
3072static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3073{
3074 switch (intr_mode) {
3075 case 0:
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3078 break;
3079 case 1:
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3082 break;
3083 case 2:
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3086 break;
3087 default:
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3090 break;
3091 }
3092 return;
3093}
3094
3095/**
3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
3097 * @phba: pointer to lpfc hba data structure.
3098 *
3099 * This routine is invoked to enable the PCI device that is common to all
3100 * PCI devices.
3101 *
3102 * Return codes
3103 * 0 - sucessful
3104 * other values - error
3105 **/
3106static int
3107lpfc_enable_pci_dev(struct lpfc_hba *phba)
3108{
3109 struct pci_dev *pdev;
3110 int bars;
3111
3112 /* Obtain PCI device reference */
3113 if (!phba->pcidev)
3114 goto out_error;
3115 else
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3121 goto out_error;
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
3129
3130 return 0;
3131
3132out_disable_device:
3133 pci_disable_device(pdev);
3134out_error:
3135 return -ENODEV;
3136}
3137
3138/**
3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3142 * This routine is invoked to disable the PCI device that is common to all
3143 * PCI devices.
3144 **/
3145static void
3146lpfc_disable_pci_dev(struct lpfc_hba *phba)
3147{
3148 struct pci_dev *pdev;
3149 int bars;
3150
3151 /* Obtain PCI device reference */
3152 if (!phba->pcidev)
3153 return;
3154 else
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
3163
3164 return;
3165}
3166
3167/**
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
3170 *
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
3175 **/
3176void
3177lpfc_reset_hba(struct lpfc_hba *phba)
3178{
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3182 return;
3183 }
3184 lpfc_offline_prep(phba);
3185 lpfc_offline(phba);
3186 lpfc_sli_brdrestart(phba);
3187 lpfc_online(phba);
3188 lpfc_unblock_mgmt_io(phba);
3189}
3190
3191/**
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3197 *
3198 * Return codes
3199 * 0 - sucessful
3200 * other values - error
3201 **/
3202static int
3203lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3204{
3205 struct lpfc_sli *psli;
3206
3207 /*
3208 * Initialize timers used by driver
3209 */
3210
3211 /* Heartbeat timer */
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3215
3216 psli = &phba->sli;
3217 /* MBOX heartbeat timer */
3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
3220 psli->mbox_tmo.data = (unsigned long) phba;
3221 /* FCP polling mode timer */
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3225 /* Fabric block timer */
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3229 /* EA polling mode timer */
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
3233
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3237
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
3240 /*
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
3244 */
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3248
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3253 }
3254
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3258
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
3262
3263 /*
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3265 */
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
3268
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3271 return -ENOMEM;
3272
3273 return 0;
3274}
3275
3276/**
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3282 **/
3283static void
3284lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3285{
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3288
3289 return;
3290}
3291
3292/**
3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3294 * @phba: pointer to lpfc hba data structure.
3295 *
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3298 *
3299 * Return codes
3300 * 0 - sucessful
3301 * other values - error
3302 **/
3303static int
3304lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3305{
3306 struct lpfc_sli *psli;
3307 int rc;
3308 int i, hbq_count;
3309
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3312 if (rc)
3313 return -ENODEV;
3314
3315 /*
3316 * Initialize timers used by driver
3317 */
3318
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3323
3324 psli = &phba->sli;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3337 /*
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3343 */
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3349
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3355
3356 /*
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3364 * 1k 50 52
3365 * 2k 114 116
3366 * 4k 242 244
3367 * 8k 498 500
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3372 */
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3379 else
3380 phba->cfg_sg_seg_cnt = 498;
3381
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3386
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3394
3395 /*
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3397 */
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3403
3404 /*
3405 * Initialize dirver internal slow-path work queues
3406 */
3407
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3420
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3424
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3427 if (rc)
3428 return -ENOMEM;
3429
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3432 if (unlikely(rc))
3433 goto out_free_mem;
3434
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3437 if (unlikely(rc))
3438 goto out_free_bsmbx;
3439
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3442 if (unlikely(rc))
3443 goto out_free_bsmbx;
3444
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3447 if (unlikely(rc))
3448 goto out_free_bsmbx;
3449
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3452 if (rc)
3453 goto out_free_bsmbx;
3454
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3457 if (rc)
3458 goto out_destroy_queue;
3459
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3462 if (rc) {
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3466 }
3467 rc = lpfc_init_active_sgl_array(phba);
3468 if (rc) {
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3472 }
3473
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3475 if (rc) {
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3479 }
3480
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3488 }
3489
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3497 }
3498
3499 return rc;
3500
3501out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3507out_free_sgl_list:
3508 lpfc_free_sgl_list(phba);
3509out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3511out_destroy_queue:
3512 lpfc_sli4_queue_destroy(phba);
3513out_free_bsmbx:
3514 lpfc_destroy_bootstrap_mbox(phba);
3515out_free_mem:
3516 lpfc_mem_free(phba);
3517 return rc;
3518}
3519
3520/**
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3523 *
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3526 **/
3527static void
3528lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3529{
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3531
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3534
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3537
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3540
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3543
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3546
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3550
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3553
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3556
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3560
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3563
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3566
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3569
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3573 kfree(conn_entry);
3574
3575 return;
3576}
3577
3578/**
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3582 *
3583 * This routine sets up the device INIT interface API function jump table
3584 * in @phba struct.
3585 *
3586 * Returns: 0 - success, -ENODEV - failure.
3587 **/
3588int
3589lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3590{
3591 switch (dev_grp) {
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3596 break;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3601 break;
3602 default:
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3605 dev_grp);
3606 return -ENODEV;
3607 break;
3608 }
3609 return 0;
3610}
3611
3612/**
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3615 *
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3618 *
3619 * Return codes
3620 * 0 - sucessful
3621 * other values - error
3622 **/
3623static int
3624lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3625{
3626 /*
3627 * Driver resources common to all SLI revisions
3628 */
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3631
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3634
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3638
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3641
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3645
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3648
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3651
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3654
3655 return 0;
3656}
3657
3658/**
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3664 *
3665 * Return codes
3666 * 0 - sucessful
3667 * other values - error
3668 **/
3669static int
3670lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3671{
3672 int error;
3673
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3679 return error;
3680 }
3681
3682 return 0;
3683}
3684
3685/**
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3688 *
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3691 * attached to.
3692 **/
3693static void
3694lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3695{
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3698}
3699
3700/**
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3703 *
3704 * This routine is invoked to free the driver's IOCB list and memory.
3705 **/
3706static void
3707lpfc_free_iocb_list(struct lpfc_hba *phba)
3708{
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3710
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3715 kfree(iocbq_entry);
3716 phba->total_iocbq_bufs--;
3717 }
3718 spin_unlock_irq(&phba->hbalock);
3719
3720 return;
3721}
3722
3723/**
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3729 *
3730 * Return codes
3731 * 0 - sucessful
3732 * other values - error
3733 **/
3734static int
3735lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3736{
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3738 uint16_t iotag;
3739 int i;
3740
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3743 for (i = 0; i < iocb_count; i++) {
3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
3748 __func__, i, LPFC_IOCB_LIST_CNT);
3749 goto out_free_iocbq;
3750 }
3751
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3753 if (iotag == 0) {
3754 kfree(iocbq_entry);
3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3756 "Unloading driver.\n", __func__);
3757 goto out_free_iocbq;
3758 }
3759 iocbq_entry->sli4_xritag = NO_XRI;
3760
3761 spin_lock_irq(&phba->hbalock);
3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
3764 spin_unlock_irq(&phba->hbalock);
3765 }
3766
3767 return 0;
3768
3769out_free_iocbq:
3770 lpfc_free_iocb_list(phba);
3771
3772 return -ENOMEM;
3773}
3774
3775/**
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3778 *
3779 * This routine is invoked to free the driver's sgl list and memory.
3780 **/
3781static void
3782lpfc_free_sgl_list(struct lpfc_hba *phba)
3783{
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3786 int rc = 0;
3787
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
3791
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3793 &sglq_list, list) {
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3796 kfree(sglq_entry);
3797 phba->sli4_hba.total_sglq_bufs--;
3798 }
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3800 if (rc) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3803 }
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3805}
3806
3807/**
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3810 *
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3813 **/
3814static int
3815lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3816{
3817 int size;
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3820
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3824 return -ENOMEM;
3825 return 0;
3826}
3827
3828/**
3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3830 * @phba: pointer to lpfc hba data structure.
3831 *
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3835 **/
3836static void
3837lpfc_free_active_sgl(struct lpfc_hba *phba)
3838{
3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3840}
3841
3842/**
3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3844 * @phba: pointer to lpfc hba data structure.
3845 *
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3848 *
3849 * Return codes
3850 * 0 - sucessful
3851 * other values - error
3852 **/
3853static int
3854lpfc_init_sgl_list(struct lpfc_hba *phba)
3855{
3856 struct lpfc_sglq *sglq_entry = NULL;
3857 int i;
3858 int els_xri_cnt;
3859
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3863 els_xri_cnt);
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3867
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3874 els_xri_cnt);
3875 return -ENOMEM;
3876 }
3877
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3881 GFP_KERNEL);
3882
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3887 els_xri_cnt);
3888 return -ENOMEM;
3889 }
3890
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3895
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3899
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3906 return -ENOMEM;
3907 }
3908
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3915 goto out_free_mem;
3916 }
3917
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3920 kfree(sglq_entry);
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3923 goto out_free_mem;
3924 }
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3928 kfree(sglq_entry);
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3935
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3942 }
3943 return 0;
3944
3945out_free_mem:
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3948 return -ENOMEM;
3949}
3950
3951/**
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3962 *
3963 * Return codes
3964 * 0 - sucessful
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3967 **/
3968int
3969lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3970{
3971 int rc = 0;
3972 int longs;
3973 uint16_t rpi_count;
3974 struct lpfc_rpi_hdr *rpi_hdr;
3975
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3977
3978 /*
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3981 */
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3984
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3987 GFP_KERNEL);
3988 if (!phba->sli4_hba.rpi_bmask)
3989 return -ENOMEM;
3990
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3992 if (!rpi_hdr) {
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
3996 rc = -ENODEV;
3997 }
3998
3999 return rc;
4000}
4001
4002/**
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4009 * by the device.
4010 *
4011 * Returns:
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4014 **/
4015struct lpfc_rpi_hdr *
4016lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4017{
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4021
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4024
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4028
4029 /*
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4033 */
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4035 return NULL;
4036
4037 /*
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4040 */
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4042 if (!dmabuf)
4043 return NULL;
4044
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4047 &dmabuf->phys,
4048 GFP_KERNEL);
4049 if (!dmabuf->virt) {
4050 rpi_hdr = NULL;
4051 goto err_free_dmabuf;
4052 }
4053
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4056 rpi_hdr = NULL;
4057 goto err_free_coherent;
4058 }
4059
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4062 if (!rpi_hdr)
4063 goto err_free_coherent;
4064
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4071
4072 /*
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4075 */
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4078 return rpi_hdr;
4079
4080 err_free_coherent:
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4083 err_free_dmabuf:
4084 kfree(dmabuf);
4085 return NULL;
4086}
4087
4088/**
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
2318 * @phba: pointer to lpfc hba data structure. 4090 * @phba: pointer to lpfc hba data structure.
2319 * 4091 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 4092 * This routine is invoked to remove all memory resources allocated
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 4093 * to support rpis. This routine presumes the caller has released all
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 4094 * rpis consumed by fabric or port logins and is prepared to have
2323 * on the current availability of PCI vector resources. The device driver is 4095 * the header pages removed.
2324 * responsible for calling the individual request_irq() to register each MSI-X 4096 **/
2325 * vector with a interrupt handler, which is done in this function. Note that 4097void
4098lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4099{
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4101
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4108 kfree(rpi_hdr);
4109 }
4110
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4113}
4114
4115/**
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4118 *
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4122 *
4123 * Return codes
4124 * pointer to @phba - sucessful
4125 * NULL - error
4126 **/
4127static struct lpfc_hba *
4128lpfc_hba_alloc(struct pci_dev *pdev)
4129{
4130 struct lpfc_hba *phba;
4131
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4134 if (!phba) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4137 return NULL;
4138 }
4139
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4142
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4146 kfree(phba);
4147 return NULL;
4148 }
4149
4150 return phba;
4151}
4152
4153/**
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4156 *
4157 * This routine is invoked to free the driver hba data structure with an
4158 * HBA device.
4159 **/
4160static void
4161lpfc_hba_free(struct lpfc_hba *phba)
4162{
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4165
4166 kfree(phba);
4167 return;
4168}
4169
4170/**
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4173 *
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4175 * host with it.
4176 *
4177 * Return codes
4178 * 0 - sucessful
4179 * other values - error
4180 **/
4181static int
4182lpfc_create_shost(struct lpfc_hba *phba)
4183{
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4186
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4192
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4194 if (!vport)
4195 return -ENODEV;
4196
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
4202
4203 return 0;
4204}
4205
4206/**
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to destroy HBA physical port and the associated
4211 * SCSI host.
4212 **/
4213static void
4214lpfc_destroy_shost(struct lpfc_hba *phba)
4215{
4216 struct lpfc_vport *vport = phba->pport;
4217
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4220
4221 return;
4222}
4223
4224/**
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4228 *
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4231 **/
4232static void
4233lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4234{
4235 int pagecnt = 10;
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4239 "SCSI layer\n");
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4242 }
4243 if (!_dump_buf_data) {
4244 while (pagecnt) {
4245 spin_lock_init(&_dump_buf_lock);
4246 _dump_buf_data =
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4255 break;
4256 } else
4257 --pagecnt;
4258 }
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4262 } else
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4266 while (pagecnt) {
4267 _dump_buf_dif =
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4276 break;
4277 } else
4278 --pagecnt;
4279 }
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4283 } else
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4285 _dump_buf_dif);
4286}
4287
4288/**
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4291 *
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4294 **/
4295static void
4296lpfc_post_init_setup(struct lpfc_hba *phba)
4297{
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4300
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4303
4304 /*
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4307 */
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4312
4313 lpfc_host_attrib_init(shost);
4314
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4319 }
4320
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4329 LPFC_NL_VENDOR_ID);
4330 return;
4331}
4332
4333/**
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4336 *
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4339 *
4340 * Return codes
4341 * 0 - sucessful
4342 * other values - error
4343 **/
4344static int
4345lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4346{
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4349 int i, hbq_count;
4350 void *ptr;
4351 int error = -ENODEV;
4352
4353 /* Obtain PCI device reference */
4354 if (!phba->pcidev)
4355 return error;
4356 else
4357 pdev = phba->pcidev;
4358
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4362 return error;
4363
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4366 */
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4369
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4372
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4378 goto out;
4379 }
4380
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4387 }
4388
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4391 SLI2_SLIM_SIZE,
4392 &phba->slim2p.phys,
4393 GFP_KERNEL);
4394 if (!phba->slim2p.virt)
4395 goto out_iounmap;
4396
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4402
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4406 GFP_KERNEL);
4407 if (!phba->hbqslimp.virt)
4408 goto out_free_slim;
4409
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4417 }
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4420
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4422
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4424
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4430
4431 return 0;
4432
4433out_free_slim:
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4436out_iounmap:
4437 iounmap(phba->ctrl_regs_memmap_p);
4438out_iounmap_slim:
4439 iounmap(phba->slim_memmap_p);
4440out:
4441 return error;
4442}
4443
4444/**
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4447 *
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4450 **/
4451static void
4452lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4453{
4454 struct pci_dev *pdev;
4455
4456 /* Obtain PCI device reference */
4457 if (!phba->pcidev)
4458 return;
4459 else
4460 pdev = phba->pcidev;
4461
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4467
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4471
4472 return;
4473}
4474
4475/**
4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4477 * @phba: pointer to lpfc hba data structure.
4478 *
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
4481 *
4482 * Return 0 if successful, otherwise -ENODEV.
4483 **/
4484int
4485lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4486{
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
4490
4491 if (!phba->sli4_hba.STAregaddr)
4492 return -ENODEV;
4493
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4507 }
4508 return -ENODEV;
4509 }
4510
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4517 break;
4518 }
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4521 port_error = 0;
4522 break;
4523 }
4524 msleep(10);
4525 }
4526
4527 if (port_error)
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4539
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4549
4550 return port_error;
4551}
4552
4553/**
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4556 *
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4558 * memory map.
4559 **/
4560static void
4561lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4562{
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE0;
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_ONLINE1;
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4572 LPFC_SCRATCHPAD;
4573}
4574
4575/**
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4584{
4585
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_STATE;
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_ISR0;
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_IMR0;
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4593 LPFC_HST_ISCR0;
4594 return;
4595}
4596
4597/**
4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 * @vf: virtual function number
4601 *
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4604 *
4605 * Return 0 if successful, otherwise -ENODEV.
4606 **/
4607static int
4608lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4609{
4610 if (vf > LPFC_VIR_FUNC_MAX)
4611 return -ENODEV;
4612
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4623 return 0;
4624}
4625
4626/**
4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4628 * @phba: pointer to lpfc hba data structure.
4629 *
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4635 * this routine.
4636 *
4637 * Return codes
4638 * 0 - sucessful
4639 * ENOMEM - could not allocated memory.
4640 **/
4641static int
4642lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4643{
4644 uint32_t bmbx_size;
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4647 uint32_t pa_addr;
4648 uint64_t phys_addr;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
4653
4654 /*
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4657 */
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 bmbx_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
4666 }
4667 memset(dmabuf->virt, 0, bmbx_size);
4668
4669 /*
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4675 */
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4678
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4683
4684 /*
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4691 */
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4697
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4701 return 0;
4702}
4703
4704/**
4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4706 * @phba: pointer to lpfc hba data structure.
4707 *
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4713 *
4714 **/
4715static void
4716lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4717{
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4722
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4725}
4726
4727/**
4728 * lpfc_sli4_read_config - Get the config parameters.
4729 * @phba: pointer to lpfc hba data structure.
4730 *
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
4735 *
4736 * Return codes
4737 * 0 - sucessful
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
4740 **/
4741static int
4742lpfc_sli4_read_config(struct lpfc_hba *phba)
4743{
4744 LPFC_MBOXQ_t *pmb;
4745 struct lpfc_mbx_read_config *rd_config;
4746 uint32_t rc = 0;
4747
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4749 if (!pmb) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4753 return -ENOMEM;
4754 }
4755
4756 lpfc_read_config(phba, pmb);
4757
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4765 rc = -EIO;
4766 } else {
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4805 "VPI(B:%d M:%d) "
4806 "VFI(B:%d M:%d) "
4807 "RPI(B:%d M:%d) "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
4819 }
4820 mempool_free(pmb, phba->mbox_mem_pool);
4821
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4826 return rc;
4827}
4828
4829/**
4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4831 * @phba: pointer to lpfc hba data structure.
4832 *
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4835 *
4836 * Return codes
4837 * 0 - sucessful
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
4840 **/
4841static int
4842lpfc_setup_endian_order(struct lpfc_hba *phba)
4843{
4844 LPFC_MBOXQ_t *mboxq;
4845 uint32_t rc = 0;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
4848
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4850 if (!mboxq) {
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4854 return -ENOMEM;
4855 }
4856
4857 /*
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4860 */
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4867 "status x%x\n",
4868 rc);
4869 rc = -EIO;
4870 }
4871
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4873 return rc;
4874}
4875
4876/**
4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
4878 * @phba: pointer to lpfc hba data structure.
4879 *
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4884 *
4885 * Return codes
4886 * 0 - sucessful
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
4889 **/
4890static int
4891lpfc_sli4_queue_create(struct lpfc_hba *phba)
4892{
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
4897
4898 /*
4899 * Sanity check for confiugred queue parameters against the run-time
4900 * device parameters
4901 */
4902
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4908 LPFC_SP_WQN_DEF;
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4913 "FCP WQs (%d)\n",
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4916 goto out_error;
4917 }
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4925 }
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4928
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4934 LPFC_SP_EQN_DEF;
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4939 "EQs (%d)\n",
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4942 goto out_error;
4943 }
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4951 }
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4962 }
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4967
4968 /*
4969 * Create Event Queues (EQs)
4970 */
4971
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4975
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4979 if (!qdesc) {
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4982 goto out_error;
4983 }
4984 phba->sli4_hba.sp_eq = qdesc;
4985
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4994 }
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4998 if (!qdesc) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5002 }
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5004 }
5005
5006 /*
5007 * Create Complete Queues (CQs)
5008 */
5009
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5013
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5017 if (!qdesc) {
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5021 }
5022 phba->sli4_hba.mbx_cq = qdesc;
5023
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5027 if (!qdesc) {
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5031 }
5032 phba->sli4_hba.els_cq = qdesc;
5033
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5037 if (!qdesc) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5041 }
5042 phba->sli4_hba.rxq_cq = qdesc;
5043
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5052 }
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5056 if (!qdesc) {
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5061 }
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5063 }
5064
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5068
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5071 if (!qdesc) {
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5075 }
5076 phba->sli4_hba.mbx_wq = qdesc;
5077
5078 /*
5079 * Create all the Work Queues (WQs)
5080 */
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5083
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5087 if (!qdesc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5091 }
5092 phba->sli4_hba.els_wq = qdesc;
5093
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5102 }
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5106 if (!qdesc) {
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5111 }
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5113 }
5114
5115 /*
5116 * Create Receive Queue (RQ)
5117 */
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5120
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5124 if (!qdesc) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5128 }
5129 phba->sli4_hba.hdr_rq = qdesc;
5130
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5134 if (!qdesc) {
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5138 }
5139 phba->sli4_hba.dat_rq = qdesc;
5140
5141 return 0;
5142
5143out_free_hdr_rq:
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5146out_free_fcp_wq:
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5150 }
5151 kfree(phba->sli4_hba.fcp_wq);
5152out_free_els_wq:
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5155out_free_mbx_wq:
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5158out_free_fcp_cq:
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5162 }
5163 kfree(phba->sli4_hba.fcp_cq);
5164out_free_rxq_cq:
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5167out_free_els_cq:
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5170out_free_mbx_cq:
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5173out_free_fp_eq:
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5177 }
5178 kfree(phba->sli4_hba.fp_eq);
5179out_free_sp_eq:
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5182out_error:
5183 return -ENOMEM;
5184}
5185
5186/**
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5191 * operation.
5192 *
5193 * Return codes
5194 * 0 - sucessful
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5197 **/
5198static void
5199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5200{
5201 int fcp_qidx;
5202
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5206
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5210
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5216
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5222
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5226
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5230
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5234
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5240
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5246
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5250
5251 return;
5252}
5253
5254/**
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5257 *
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5259 * operation.
5260 *
5261 * Return codes
5262 * 0 - sucessful
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5265 **/
5266int
5267lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5268{
5269 int rc = -ENOMEM;
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5272
5273 /*
5274 * Set up Event Queues (EQs)
5275 */
5276
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5281 goto out_error;
5282 }
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5284 LPFC_SP_DEF_IMAX);
5285 if (rc) {
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5288 "rc = 0x%x\n", rc);
5289 goto out_error;
5290 }
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5294
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5302 }
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5305 if (rc) {
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5310 }
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5315 }
5316
5317 /*
5318 * Set up Complete Queues (CQs)
5319 */
5320
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5326 }
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5329 if (rc) {
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5332 "rc = 0x%x\n", rc);
5333 goto out_destroy_fp_eq;
5334 }
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5339
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5345 }
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5348 if (rc) {
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5351 "rc = 0x%x\n", rc);
5352 goto out_destroy_mbx_cq;
5353 }
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5358
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5364 }
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5367 if (rc) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5370 "rc = 0x%x\n", rc);
5371 goto out_destroy_els_cq;
5372 }
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5377
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5385 }
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5389 if (rc) {
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5394 }
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5398 fcp_cqidx,
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5400 fcp_cqidx,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5402 }
5403
5404 /*
5405 * Set up all the Work Queues (WQs)
5406 */
5407
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5413 }
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5416 if (rc) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5419 "rc = 0x%x\n", rc);
5420 goto out_destroy_fcp_cq;
5421 }
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5426
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5432 }
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5435 if (rc) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5438 "rc = 0x%x\n", rc);
5439 goto out_destroy_mbx_wq;
5440 }
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5445
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5453 }
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5456 LPFC_FCP);
5457 if (rc) {
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5462 }
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5466 fcp_wqidx,
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5468 fcp_cq_index,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5472 }
5473
5474 /*
5475 * Create Receive Queue (RQ)
5476 */
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5481 }
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5484 if (rc) {
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5487 "rc = 0x%x\n", rc);
5488 goto out_destroy_fcp_wq;
5489 }
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5496 return 0;
5497
5498out_destroy_fcp_wq:
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5502out_destroy_mbx_wq:
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5504out_destroy_fcp_cq:
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5508out_destroy_els_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5510out_destroy_mbx_cq:
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5512out_destroy_fp_eq:
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5516out_error:
5517 return rc;
5518}
5519
5520/**
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5523 *
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5525 * operation.
5526 *
5527 * Return codes
5528 * 0 - sucessful
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5531 **/
5532void
5533lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5534{
5535 int fcp_qidx;
5536
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5560}
5561
5562/**
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5565 *
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5573 *
5574 * Return codes
5575 * 0 - sucessful
5576 * -ENOMEM - No availble memory
5577 **/
5578static int
5579lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5580{
5581 struct lpfc_cq_event *cq_event;
5582 int i;
5583
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5586 if (!cq_event)
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5590 }
5591 return 0;
5592
5593out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5595 return -ENOMEM;
5596}
5597
5598/**
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5601 *
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5607 **/
5608static void
5609lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5610{
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5612
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5616 kfree(cq_event);
5617 }
5618}
5619
5620/**
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5626 *
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5628 * NULL otherwise.
5629 **/
5630struct lpfc_cq_event *
5631__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5632{
5633 struct lpfc_cq_event *cq_event = NULL;
5634
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5637 return cq_event;
5638}
5639
5640/**
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5643 *
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5646 *
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5648 * NULL otherwise.
5649 **/
5650struct lpfc_cq_event *
5651lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5652{
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5659 return cq_event;
5660}
5661
5662/**
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5666 *
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5669 **/
5670void
5671__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5673{
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5675}
5676
5677/**
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5681 *
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5684 **/
5685void
5686lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5688{
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5693}
5694
5695/**
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5701 **/
5702static void
5703lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5704{
5705 LIST_HEAD(cqelist);
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5708
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5713 &cqelist);
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5716 &cqelist);
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5719 &cqelist);
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5721
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5725 }
5726}
5727
5728/**
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5731 *
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5734 *
5735 * Return codes
5736 * 0 - sucessful
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5739 **/
5740int
5741lpfc_pci_function_reset(struct lpfc_hba *phba)
5742{
5743 LPFC_MBOXQ_t *mboxq;
5744 uint32_t rc = 0;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5747
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5749 if (!mboxq) {
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5753 return -ENOMEM;
5754 }
5755
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5772 rc = -ENXIO;
5773 }
5774 return rc;
5775}
5776
5777/**
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5781 *
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5784 *
5785 * Return: the number of NOP mailbox command completed.
5786 **/
5787static int
5788lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5789{
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5792 uint32_t mbox_tmo;
5793 uint32_t rc = 0;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5796
5797 if (cnt == 0) {
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5800 return cnt;
5801 }
5802
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5804 if (!mboxq) {
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5808 return 0;
5809 }
5810
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5816
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 else
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5824 break;
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5830 &shdr->response);
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5837 break;
5838 }
5839 }
5840
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5843
5844 return cmdsent;
5845}
5846
5847/**
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5850 * @fcfi: fcf index.
5851 *
5852 * This routine is invoked to unregister a FCFI from device.
5853 **/
5854void
5855lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5856{
5857 LPFC_MBOXQ_t *mbox;
5858 uint32_t mbox_tmo;
5859 int rc;
5860 unsigned long flags;
5861
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863
5864 if (!mbox)
5865 return;
5866
5867 lpfc_unreg_fcfi(mbox, fcfi);
5868
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5871 else {
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5874 }
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5882 else {
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5888 }
5889}
5890
5891/**
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5897 *
5898 * Return codes
5899 * 0 - sucessful
5900 * other values - error
5901 **/
5902static int
5903lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5904{
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5908
5909 /* Obtain PCI device reference */
5910 if (!phba->pcidev)
5911 return error;
5912 else
5913 pdev = phba->pcidev;
5914
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5918 return error;
5919
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5923 */
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5926
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5929
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5932
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5939 goto out;
5940 }
5941
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5949 }
5950
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5958 }
5959
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5962
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5965
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5968 if (error)
5969 goto out_iounmap_all;
5970
5971 return 0;
5972
5973out_iounmap_all:
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5975out_iounmap_ctrl:
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5977out_iounmap_conf:
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5979out:
5980 return error;
5981}
5982
5983/**
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5989 **/
5990static void
5991lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5992{
5993 struct pci_dev *pdev;
5994
5995 /* Obtain PCI device reference */
5996 if (!phba->pcidev)
5997 return;
5998 else
5999 pdev = phba->pcidev;
6000
6001 /* Free coherent DMA memory allocated */
6002
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6007
6008 return;
6009}
6010
6011/**
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6014 *
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6022 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6023 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6029 * other values - error
2334 **/ 6030 **/
2335static int 6031static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6032lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6033{
2338 int rc, i; 6034 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6035 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6047 goto msi_fail_out;
2352 } else 6048 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6051 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6052 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6053 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6054 phba->msix_entries[i].entry);
2359 /* 6055 /*
2360 * Assign MSI-X vectors to interrupt handlers 6056 * Assign MSI-X vectors to interrupt handlers
2361 */ 6057 */
2362 6058
2363 /* vector-0 is associated to slow-path handler */ 6059 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6060 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6063 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6065 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6068 }
2372 6069
2373 /* vector-1 is associated to fast-path handler */ 6070 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6071 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6074
2377 if (rc) { 6075 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6100 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6101 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6103 goto mbx_fail_out;
2406 } 6104 }
2407 6105
@@ -2428,14 +6126,14 @@ msi_fail_out:
2428} 6126}
2429 6127
2430/** 6128/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6130 * @phba: pointer to lpfc hba data structure.
2433 * 6131 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6132 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6134 **/
2437static void 6135static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6136lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6137{
2440 int i; 6138 int i;
2441 6139
@@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6142 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6143 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6144 pci_disable_msix(phba->pcidev);
6145
6146 return;
2447} 6147}
2448 6148
2449/** 6149/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6151 * @phba: pointer to lpfc hba data structure.
2452 * 6152 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6153 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6155 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
2457 * 6158 *
2458 * Return codes 6159 * Return codes
2459 * 0 - sucessful 6160 * 0 - sucessful
2460 * other values - error 6161 * other values - error
2461 */ 6162 */
2462static int 6163static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6164lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6165{
2465 int rc; 6166 int rc;
2466 6167
@@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6175 return rc;
2475 } 6176 }
2476 6177
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6180 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6181 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6186}
2486 6187
2487/** 6188/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6190 * @phba: pointer to lpfc hba data structure.
2490 * 6191 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6192 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6196 * its vector.
2495 */ 6197 */
2496
2497static void 6198static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6199lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6200{
2500 free_irq(phba->pcidev->irq, phba); 6201 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6202 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6204}
2504 6205
2505/** 6206/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6208 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6209 *
2510 * This routine it invoked to log the currently used active interrupt mode 6210 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6215 * of:
6216 * MSI-X -> MSI -> IRQ.
6217 *
6218 * Return codes
6219 * 0 - sucessful
6220 * other values - error
6221 **/
6222static uint32_t
6223lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6224{
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6226 int retval;
6227
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6231 if (!retval) {
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6234 if (!retval) {
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6237 intr_mode = 2;
6238 }
6239 }
6240 }
6241
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6245 if (!retval) {
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6248 intr_mode = 1;
6249 }
6250 }
6251
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6256 if (!retval) {
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6259 intr_mode = 0;
6260 }
6261 }
6262 return intr_mode;
6263}
6264
6265/**
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6268 *
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6273 **/
2513static void 6274static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6275lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6276{
2516 switch (intr_mode) { 6277 /* Disable the currently initialized interrupt mode */
2517 case 0: 6278 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6279 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6280 else if (phba->intr_type == MSI)
2520 break; 6281 lpfc_sli_disable_msi(phba);
2521 case 1: 6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6284
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6288
6289 return;
6290}
6291
6292/**
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6307 *
6308 * Return codes
6309 * 0 - sucessful
6310 * other values - error
6311 **/
6312static int
6313lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6314{
6315 int rc, index;
6316
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6320
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6324 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6327 goto msi_fail_out;
2525 case 2: 6328 }
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6332 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6333 "message=%d\n", index,
2529 default: 6334 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6335 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6336 /*
2532 break; 6337 * Assign MSI-X vectors to interrupt handlers
6338 */
6339
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6347 "(%d)\n", rc);
6348 goto msi_fail_out;
2533 } 6349 }
2534 return; 6350
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6359 if (rc) {
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6363 goto cfg_fail_out;
6364 }
6365 }
6366
6367 return rc;
6368
6369cfg_fail_out:
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6374
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6377
6378msi_fail_out:
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6381 return rc;
2535} 6382}
2536 6383
6384/**
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6387 *
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6390 **/
2537static void 6391static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6392lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6393{
2540 /* Clear all interrupt enable conditions */ 6394 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6395
2547 /* Reset some HBA SLI setup states */ 6396 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6398
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6402 /* Disable MSI-X */
6403 pci_disable_msix(phba->pcidev);
6404
6405 return;
6406}
6407
6408/**
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6411 *
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6417 *
6418 * Return codes
6419 * 0 - sucessful
6420 * other values - error
6421 **/
6422static int
6423lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6424{
6425 int rc, index;
6426
6427 rc = pci_enable_msi(phba->pcidev);
6428 if (!rc)
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6431 else {
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6434 return rc;
6435 }
6436
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6439 if (rc) {
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6443 }
6444
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6448 }
6449
6450 return rc;
6451}
2550 6452
6453/**
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6456 *
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6461 * its vector.
6462 **/
6463static void
6464lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6465{
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
2551 return; 6468 return;
2552} 6469}
2553 6470
2554/** 6471/**
2555 * lpfc_enable_intr - Enable device interrupt 6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6473 * @phba: pointer to lpfc hba data structure.
2557 * 6474 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6475 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6477 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6478 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6479 * interrupt mode which is supported by the platform, kernel, and device in
6480 * the order of:
6481 * MSI-X -> MSI -> IRQ.
2563 * 6482 *
2564 * Return codes 6483 * Return codes
2565 * 0 - sucessful 6484 * 0 - sucessful
2566 * other values - error 6485 * other values - error
2567 **/ 6486 **/
2568static uint32_t 6487static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6488lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6489{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6490 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6491 int retval, index;
2573 6492
2574 if (cfg_mode == 2) { 6493 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6494 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6495 retval = 0;
2577 if (!retval) { 6496 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6497 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6498 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6499 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6500 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6501 phba->intr_type = MSIX;
@@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6506
2588 /* Fallback to MSI if MSI-X initialization failed */ 6507 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6509 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6510 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6511 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6512 phba->intr_type = MSI;
@@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6516
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6518 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6521 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6522 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6523 phba->intr_type = INTx;
2605 intr_mode = 0; 6524 intr_mode = 0;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6526 index++) {
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529 }
2606 } 6530 }
2607 } 6531 }
2608 return intr_mode; 6532 return intr_mode;
2609} 6533}
2610 6534
2611/** 6535/**
2612 * lpfc_disable_intr - Disable device interrupt 6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6537 * @phba: pointer to lpfc hba data structure.
2614 * 6538 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6539 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6542 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6543 **/
2620static void 6544static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6545lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6546{
2623 /* Disable the currently initialized interrupt mode */ 6547 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6548 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6549 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6550 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6551 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6552 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6553 free_irq(phba->pcidev->irq, phba);
2630 6554
@@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6560}
2637 6561
2638/** 6562/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6564 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6565 *
2651 * Return code 6566 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6567 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6568 **/
2655static int __devinit 6569static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6570lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6571{
2658 struct lpfc_vport *vport = NULL; 6572 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676 6574
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 6575 spin_lock_irq(shost->host_lock);
2678 if (!phba) 6576 vport->load_flag |= FC_UNLOADING;
2679 goto out_release_regions; 6577 spin_unlock_irq(shost->host_lock);
2680 6578
2681 atomic_set(&phba->fast_event_count, 0); 6579 lpfc_stop_hba_timers(phba);
2682 spin_lock_init(&phba->hbalock);
2683 6580
2684 /* Initialize ndlp management spinlock */ 6581 phba->pport->work_port_events = 0;
2685 spin_lock_init(&phba->ndlp_lock);
2686 6582
2687 phba->pcidev = pdev; 6583 lpfc_sli_hba_down(phba);
2688 6584
2689 /* Assign an unused board number */ 6585 lpfc_sli_brdrestart(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6586
2693 INIT_LIST_HEAD(&phba->port_list); 6587 lpfc_sli_disable_intr(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6588
2702 /* Initialize timers used by driver */ 6589 return;
2703 init_timer(&phba->hb_tmofunc); 6590}
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6591
2707 psli = &phba->sli; 6592/**
2708 init_timer(&psli->mbox_tmo); 6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2709 psli->mbox_tmo.function = lpfc_mbox_timeout; 6594 * @phba: pointer to lpfc hba data structure.
2710 psli->mbox_tmo.data = (unsigned long) phba; 6595 *
2711 init_timer(&phba->fcp_poll_timer); 6596 * This routine is invoked to unset the HBA device initialization steps to
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout; 6597 * a device with SLI-4 interface spec.
2713 phba->fcp_poll_timer.data = (unsigned long) phba; 6598 **/
2714 init_timer(&phba->fabric_block_timer); 6599static void
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 6600lpfc_sli4_unset_hba(struct lpfc_hba *phba)
2716 phba->fabric_block_timer.data = (unsigned long) phba; 6601{
2717 init_timer(&phba->eratt_poll); 6602 struct lpfc_vport *vport = phba->pport;
2718 phba->eratt_poll.function = lpfc_poll_eratt; 6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6604
2721 pci_set_master(pdev); 6605 spin_lock_irq(shost->host_lock);
2722 pci_save_state(pdev); 6606 vport->load_flag |= FC_UNLOADING;
2723 pci_try_set_mwi(pdev); 6607 spin_unlock_irq(shost->host_lock);
2724 6608
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6609 phba->pport->work_port_events = 0;
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
2727 goto out_idr_remove;
2728 6610
2729 /* 6611 lpfc_sli4_hba_down(phba);
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6612
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6613 lpfc_sli4_disable_intr(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6614
2739 /* Map HBA SLIM to a kernel virtual address. */ 6615 return;
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6616}
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747
2748 /* Map HBA Control Registers to a kernel virtual address. */
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6617
2757 /* Allocate memory for SLI-2 structures */ 6618/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6620 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6621 *
2761 GFP_KERNEL); 6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6623 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6627 **/
6628static void
6629lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6630{
6631 int wait_cnt = 0;
6632 LPFC_MBOXQ_t *mboxq;
2764 6633
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6634 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6635 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6636
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6637 /*
2772 lpfc_sli_hbq_size(), 6638 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6639 * mailbox command.
2774 GFP_KERNEL); 6640 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6641
2778 hbq_count = lpfc_sli_hbq_count(); 6642 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6643 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6645 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6646 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6648 msleep(10);
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6650 break;
2785 } 6651 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6652 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6654 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6655 mboxq = phba->sli.mbox_active;
2790 6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6657 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6659 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6660 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6661 }
2802 6662
2803 /* Initialize and populate the iocb list per host. */ 6663 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6664 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6665
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6666 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6667 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6668
2825 spin_lock_irq(&phba->hbalock); 6669 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6670 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6671
2831 /* Initialize HBA structure */ 6672 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6673 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6674}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6675
2837 INIT_LIST_HEAD(&phba->work_list); 6676/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6680 *
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6688 *
6689 * Return code
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6692 **/
6693static int __devinit
6694lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6695{
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6698 int error;
6699 uint32_t cfg_mode, intr_mode;
2840 6700
2841 /* Initialize the wait queue head for the kernel thread */ 6701 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6702 phba = lpfc_hba_alloc(pdev);
6703 if (!phba)
6704 return -ENOMEM;
2843 6705
2844 /* Startup the kernel thread for this host adapter. */ 6706 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6707 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6708 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6710 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6711 goto out_free_phba;
2850 } 6712 }
2851 6713
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6716 if (error)
6717 goto out_disable_pci_dev;
2855 6718
2856 /* Initialize list of fabric iocbs */ 6719 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6720 error = lpfc_sli_pci_mem_setup(phba);
6721 if (error) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6725 }
2858 6726
2859 /* Initialize list to save ELS buffers */ 6727 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6728 error = lpfc_setup_driver_resource_phase1(phba);
6729 if (error) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6733 }
2861 6734
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6735 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6736 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6737 if (error) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6741 }
2865 6742
2866 shost = lpfc_shost_from_vport(vport); 6743 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6745 if (error) {
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6749 }
2869 6750
2870 pci_set_drvdata(pdev, shost); 6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6753 if (error) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6757 }
2871 6758
2872 phba->MBslimaddr = phba->slim_memmap_p; 6759 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6760 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6761 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6765 }
2877 6766
2878 /* Configure sysfs attributes */ 6767 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6770 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6772 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6773 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6774 }
2885 6775
6776 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6777 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6778 while (true) {
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6781 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6783 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6785 "0431 Failed to enable interrupt.\n");
6786 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6787 goto out_free_sysfs_attr;
2894 } 6788 }
2895 /* HBA SLI setup */ 6789 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6790 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6792 "1477 Failed to set up hba\n");
@@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6796
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6797 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6798 msleep(50);
2905 /* Check active interrupts received */ 6799 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6802 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6803 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6804 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6805 break;
2911 } else { 6806 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6808 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6809 "failed active interrupt test.\n",
2915 intr_mode); 6810 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6811 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6812 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6813 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6814 cfg_mode = --intr_mode;
2929 } 6815 }
2930 } 6816 }
2931 6817
2932 /* 6818 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6819 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6820
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6821 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6822 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6823
3025 return 0; 6824 return 0;
3026 6825
3027out_remove_device: 6826out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6827 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6828out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6829 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6830out_destroy_shost:
3039 destroy_port(vport); 6831 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6832out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6833 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6834out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6835 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6836out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6837 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6838out_unset_pci_mem_s3:
3047 } 6839 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6840out_disable_pci_dev:
3049out_free_hbqslimp: 6841 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6842out_free_phba:
3062 kfree(phba); 6843 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6844 return error;
3072} 6845}
3073 6846
3074/** 6847/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6849 * @pdev: pointer to PCI device
3077 * 6850 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6851 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
3081 **/ 6855 **/
3082static void __devexit 6856static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6857lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6858{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6872 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6873 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6874 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6876 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6877 lpfc_destroy_vport_work_array(phba, vports);
3104 6878
@@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6894 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6895 lpfc_sli_brdrestart(phba);
3122 6896
3123 lpfc_stop_phba_timers(phba); 6897 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6898 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6899 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6900 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6902 lpfc_debugfs_terminate(vport);
3129 6903
3130 /* Disable interrupt */ 6904 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6905 lpfc_sli_disable_intr(phba);
3132 6906
3133 pci_set_drvdata(pdev, NULL); 6907 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6908 scsi_host_put(shost);
@@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6912 * corresponding pools here.
3139 */ 6913 */
3140 lpfc_scsi_free(phba); 6914 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6915 lpfc_mem_free_all(phba);
3142 6916
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6925 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6926 iounmap(phba->slim_memmap_p);
3153 6927
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6928 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6929
3158 pci_release_selected_regions(pdev, bars); 6930 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6931 pci_disable_device(pdev);
3160} 6932}
3161 6933
3162/** 6934/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6936 * @pdev: pointer to PCI device
3165 * @msg: power management message 6937 * @msg: power management message
3166 * 6938 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6939 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6940 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6941 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6942 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6943 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6944 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6946 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6947 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
3177 * 6950 *
3178 * Return code 6951 * Return code
3179 * 0 - driver suspended the device 6952 * 0 - driver suspended the device
3180 * Error otherwise 6953 * Error otherwise
3181 **/ 6954 **/
3182static int 6955static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6956lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6957{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6967 kthread_stop(phba->worker_thread);
3195 6968
3196 /* Disable interrupt from device */ 6969 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6970 lpfc_sli_disable_intr(phba);
3198 6971
3199 /* Save device state to PCI config space */ 6972 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6973 pci_save_state(pdev);
@@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6977}
3205 6978
3206/** 6979/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6981 * @pdev: pointer to PCI device
3209 * 6982 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6983 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6985 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6986 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6987 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6990 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
3219 * 6993 *
3220 * Return code 6994 * Return code
3221 * 0 - driver suspended the device 6995 * 0 - driver suspended the device
3222 * Error otherwise 6996 * Error otherwise
3223 **/ 6997 **/
3224static int 6998static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6999lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 7000{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7024 }
3251 7025
3252 /* Configure and enable interrupt */ 7026 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7028 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7030 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7043}
3270 7044
3271/** 7045/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7047 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7048 * @state: the current PCI connection state.
3275 * 7049 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7050 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7051 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7052 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7053 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7054 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7056 * as desired.
3282 * 7057 *
3283 * Return codes 7058 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7061 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7062static pci_ers_result_t
3288 pci_channel_state_t state) 7063lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7064{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7087 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7088
3314 /* Disable interrupt */ 7089 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7090 lpfc_sli_disable_intr(phba);
3316 7091
3317 /* Request a slot reset. */ 7092 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7093 return PCI_ERS_RESULT_NEED_RESET;
3319} 7094}
3320 7095
3321/** 7096/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7098 * @pdev: pointer to PCI device.
3324 * 7099 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7100 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7101 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7103 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7105 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7106 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
3333 * 7109 *
3334 * Return codes 7110 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7113 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7114static pci_ers_result_t
7115lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7116{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7131 pci_set_master(pdev);
3355 7132
3356 spin_lock_irq(&phba->hbalock); 7133 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7135 spin_unlock_irq(&phba->hbalock);
3359 7136
3360 /* Configure and enable interrupt */ 7137 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7139 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7141 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7155}
3379 7156
3380/** 7157/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7159 * @pdev: pointer to PCI device
3383 * 7160 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7161 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7164 * error recovery. After this call, traffic can start to flow from this device
7165 * again.
3388 */ 7166 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7167static void
7168lpfc_io_resume_s3(struct pci_dev *pdev)
7169{
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7172
7173 lpfc_online(phba);
7174}
7175
7176/**
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7179 *
7180 * returns the number of ELS/CT IOCBs to reserve
7181 **/
7182int
7183lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186
7187 if (max_xri <= 100)
7188 return 4;
7189 else if (max_xri <= 256)
7190 return 8;
7191 else if (max_xri <= 512)
7192 return 16;
7193 else if (max_xri <= 1024)
7194 return 32;
7195 else
7196 return 48;
7197}
7198
7199/**
7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7203 *
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7211 * properly.
7212 *
7213 * Return code
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7216 **/
7217static int __devinit
7218lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7219{
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7222 int error;
7223 uint32_t cfg_mode, intr_mode;
7224 int mcnt;
7225
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7228 if (!phba)
7229 return -ENOMEM;
7230
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7233 if (error) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7235 "1409 Failed to enable pci device.\n");
7236 goto out_free_phba;
7237 }
7238
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7241 if (error)
7242 goto out_disable_pci_dev;
7243
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
7246 if (error) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7248 "1410 Failed to set up pci memory space.\n");
7249 goto out_disable_pci_dev;
7250 }
7251
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7254 if (error) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
7258 }
7259
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
7262 if (error) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
7266 }
7267
7268 /* Initialize and populate the iocb list per host */
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
7271 if (error) {
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
7275 }
7276
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7279 if (error) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281 "1414 Failed to set up driver resource.\n");
7282 goto out_free_iocb_list;
7283 }
7284
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7287 if (error) {
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289 "1415 Failed to create scsi host.\n");
7290 goto out_unset_driver_resource;
7291 }
7292
7293 /* Configure sysfs attributes */
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7296 if (error) {
7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298 "1416 Failed to allocate sysfs attr\n");
7299 goto out_destroy_shost;
7300 }
7301
7302 /* Now, trying to enable interrupt and bring up the device */
7303 cfg_mode = phba->cfg_use_msi;
7304 while (true) {
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
7307 /* Configure and enable interrupt */
7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 "0426 Failed to enable interrupt.\n");
7312 error = -ENODEV;
7313 goto out_free_sysfs_attr;
7314 }
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "1421 Failed to set up hba\n");
7319 error = -ENODEV;
7320 goto out_disable_intr;
7321 }
7322
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7324 if (intr_mode != 0)
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7326 LPFC_ACT_INTR_CNT);
7327
7328 /* Check active interrupts received only for MSI/MSI-X */
7329 if (intr_mode == 0 ||
7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7334 break;
7335 }
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7339 intr_mode);
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
7344 }
7345
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
7348
7349 return 0;
7350
7351out_disable_intr:
7352 lpfc_sli4_disable_intr(phba);
7353out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
7355out_destroy_shost:
7356 lpfc_destroy_shost(phba);
7357out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7359out_free_iocb_list:
7360 lpfc_free_iocb_list(phba);
7361out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
7365out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
7367out_free_phba:
7368 lpfc_hba_free(phba);
7369 return error;
7370}
7371
7372/**
7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7374 * @pdev: pointer to PCI device
7375 *
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
7380 **/
7381static void __devexit
7382lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7383{
7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7386 struct lpfc_vport **vports;
7387 struct lpfc_hba *phba = vport->phba;
7388 int i;
7389
7390 /* Mark the device unloading flag */
7391 spin_lock_irq(&phba->hbalock);
7392 vport->load_flag |= FC_UNLOADING;
7393 spin_unlock_irq(&phba->hbalock);
7394
7395 /* Free the HBA sysfs attributes */
7396 lpfc_free_sysfs_attr(vport);
7397
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7400 if (vports != NULL)
7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7404
7405 /* Remove FC host and then SCSI host with the physical port */
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
7408
7409 /* Perform cleanup on the physical port */
7410 lpfc_cleanup(vport);
7411
7412 /*
7413 * Bring down the SLI Layer. This step disables all interrupts,
7414 * clears the rings, discards all mailbox commands, and resets
7415 * the HBA FCoE function.
7416 */
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
7419
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7423
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
7426 */
7427 lpfc_scsi_free(phba);
7428 lpfc_sli4_driver_resource_unset(phba);
7429
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
7432
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
7436
7437 /* Finally, free the driver's device data structure */
7438 lpfc_hba_free(phba);
7439
7440 return;
7441}
7442
7443/**
7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7447 *
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
7458 * according to the @msg provided by the PM.
7459 *
7460 * Return code
7461 * 0 - driver suspended the device
7462 * Error otherwise
7463 **/
7464static int
7465lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7466{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7469
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0298 PCI device Power Management suspend.\n");
7472
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7475 lpfc_offline(phba);
7476 kthread_stop(phba->worker_thread);
7477
7478 /* Disable interrupt from device */
7479 lpfc_sli4_disable_intr(phba);
7480
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7484
7485 return 0;
7486}
7487
7488/**
7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7490 * @pdev: pointer to PCI device
7491 *
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7501 * state.
7502 *
7503 * Return code
7504 * 0 - driver suspended the device
7505 * Error otherwise
7506 **/
7507static int
7508lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7509{
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7512 uint32_t intr_mode;
7513 int error;
7514
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0292 PCI device Power Management resume.\n");
7517
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7523
7524 /* Startup the kernel thread for this host adapter. */
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7530 "0293 PM resume failed to start worker "
7531 "thread: error=x%x.\n", error);
7532 return error;
7533 }
7534
7535 /* Configure and enable interrupt */
7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7537 if (intr_mode == LPFC_INTR_ERROR) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7539 "0294 PM resume Failed to enable interrupt\n");
7540 return -EIO;
7541 } else
7542 phba->intr_mode = intr_mode;
7543
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7546 lpfc_online(phba);
7547
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7550
7551 return 0;
7552}
7553
7554/**
7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
7558 *
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
7565 *
7566 * Return codes
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7569 **/
7570static pci_ers_result_t
7571lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7572{
7573 return PCI_ERS_RESULT_NEED_RESET;
7574}
7575
7576/**
7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7578 * @pdev: pointer to PCI device.
7579 *
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
7589 *
7590 * Return codes
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7593 */
7594static pci_ers_result_t
7595lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7596{
7597 return PCI_ERS_RESULT_RECOVERED;
7598}
7599
7600/**
7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7602 * @pdev: pointer to PCI device
7603 *
7604 * This routine is called from the PCI subsystem for error handling to device
7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7608 * again.
7609 **/
7610static void
7611lpfc_io_resume_s4(struct pci_dev *pdev)
7612{
7613 return;
7614}
7615
7616/**
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7620 *
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7628 * properly.
7629 *
7630 * Return code
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7633 **/
7634static int __devinit
7635lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7636{
7637 int rc;
7638 uint16_t dev_id;
7639
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7641 return -ENODEV;
7642
7643 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7946 { 0 }
3473}; 7947};
3474 7948
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7960 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7961 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7962 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7963 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7964 .err_handler = &lpfc_err_handler,
3491}; 7965};
3492 7966
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b012..954ba57970a3 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc2127..b9b451c09010 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0)
1637 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1638 resid_len = length - alloc_len;
1639 if (resid_len > PAGE_SIZE) {
1640 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1641 PAGE_SIZE);
1642 alloc_len += PAGE_SIZE;
1643 } else {
1644 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1645 resid_len);
1646 alloc_len = length;
1647 }
1648 }
1649
1650 /* Set up main header fields in mailbox command */
1651 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1652 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1653
1654 /* Set up sub-header fields into the first page */
1655 if (pagen > 0) {
1656 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1657 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1658 cfg_shdr->request.request_length =
1659 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1660 }
1661 /* The sub-header is in DMA memory, which needs endian converstion */
1662 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1663 sizeof(union lpfc_sli4_cfg_shdr));
1664
1665 return alloc_len;
1666}
1667
1668/**
1669 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1670 * @phba: pointer to lpfc hba data structure.
1671 * @mbox: pointer to lpfc mbox command.
1672 *
1673 * This routine gets the opcode from a SLI4 specific mailbox command for
1674 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1675 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1676 * returned.
1677 **/
1678uint8_t
1679lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1680{
1681 struct lpfc_mbx_sli4_config *sli4_cfg;
1682 union lpfc_sli4_cfg_shdr *cfg_shdr;
1683
1684 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1685 return 0;
1686 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1687
1688 /* For embedded mbox command, get opcode from embedded sub-header*/
1689 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1690 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1691 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1692 }
1693
1694 /* For non-embedded mbox command, get opcode from first dma page */
1695 if (unlikely(!mbox->sge_array))
1696 return 0;
1697 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1698 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1699}
1700
1701/**
1702 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1703 * @mboxq: pointer to lpfc mbox command.
1704 *
1705 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1706 * mailbox command.
1707 **/
1708void
1709lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1710{
1711 /* Set up SLI4 mailbox command header fields */
1712 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1713 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1714
1715 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717
1718 /* Virtual fabrics and FIPs are not supported yet. */
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1720
1721 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg)
1723 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1724
1725 /* Enable NPIV only if configured to do so. */
1726 if (phba->max_vpi && phba->cfg_enable_npiv)
1727 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1728
1729 return;
1730}
1731
1732/**
1733 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1734 * @mbox: pointer to lpfc mbox command to initialize.
1735 * @vport: Vport associated with the VF.
1736 *
1737 * This routine initializes @mbox to all zeros and then fills in the mailbox
1738 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1739 * in the context of an FCF. The driver issues this command to setup a VFI
1740 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1741 * REG_VFI after a successful VSAN login.
1742 **/
1743void
1744lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1745{
1746 struct lpfc_mbx_init_vfi *init_vfi;
1747
1748 memset(mbox, 0, sizeof(*mbox));
1749 init_vfi = &mbox->u.mqe.un.init_vfi;
1750 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1751 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1752 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1753 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1754 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1755}
1756
1757/**
1758 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1759 * @mbox: pointer to lpfc mbox command to initialize.
1760 * @vport: vport associated with the VF.
1761 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1762 *
1763 * This routine initializes @mbox to all zeros and then fills in the mailbox
1764 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1765 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1766 * fabrics identified by VFI in the context of an FCF.
1767 **/
1768void
1769lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1770{
1771 struct lpfc_mbx_reg_vfi *reg_vfi;
1772
1773 memset(mbox, 0, sizeof(*mbox));
1774 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1775 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1776 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1777 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1778 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1779 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1780 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1781 reg_vfi->bde.addrLow = putPaddrLow(phys);
1782 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1783 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1784 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1785}
1786
1787/**
1788 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1789 * @mbox: pointer to lpfc mbox command to initialize.
1790 * @vpi: VPI to be initialized.
1791 *
1792 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1793 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1794 * with the virtual N Port. The SLI Host issues this command before issuing a
1795 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1796 * successful virtual NPort login.
1797 **/
1798void
1799lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1800{
1801 memset(mbox, 0, sizeof(*mbox));
1802 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1803 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1804}
1805
1806/**
1807 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1808 * @mbox: pointer to lpfc mbox command to initialize.
1809 * @vfi: VFI to be unregistered.
1810 *
1811 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1812 * (logical NPort) into the inactive state. The SLI Host must have logged out
1813 * and unregistered all remote N_Ports to abort any activity on the virtual
1814 * fabric. The SLI Port posts the mailbox response after marking the virtual
1815 * fabric inactive.
1816 **/
1817void
1818lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1819{
1820 memset(mbox, 0, sizeof(*mbox));
1821 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1822 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1823}
1824
1825/**
1826 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1827 * @phba: pointer to the hba structure containing.
1828 * @mbox: pointer to lpfc mbox command to initialize.
1829 *
1830 * This function create a SLI4 dump mailbox command to dump FCoE
1831 * parameters stored in region 23.
1832 **/
1833int
1834lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1835 struct lpfcMboxq *mbox)
1836{
1837 struct lpfc_dmabuf *mp = NULL;
1838 MAILBOX_t *mb;
1839
1840 memset(mbox, 0, sizeof(*mbox));
1841 mb = &mbox->u.mb;
1842
1843 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1844 if (mp)
1845 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1846
1847 if (!mp || !mp->virt) {
1848 kfree(mp);
1849 /* dump_fcoe_param failed to allocate memory */
1850 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1851 "2569 lpfc_dump_fcoe_param: memory"
1852 " allocation failed \n");
1853 return 1;
1854 }
1855
1856 memset(mp->virt, 0, LPFC_BPL_SIZE);
1857 INIT_LIST_HEAD(&mp->list);
1858
1859 /* save address for completion */
1860 mbox->context1 = (uint8_t *) mp;
1861
1862 mb->mbxCommand = MBX_DUMP_MEMORY;
1863 mb->un.varDmp.type = DMP_NV_PARAMS;
1864 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1865 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1866 mb->un.varWords[3] = putPaddrLow(mp->phys);
1867 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1868 return 0;
1869}
1870
1871/**
1872 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1873 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1874 * @mbox: pointer to lpfc mbox command to initialize.
1875 *
1876 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1877 * SLI Host uses the command to activate an FCF after it has acquired FCF
1878 * information via a READ_FCF mailbox command. This mailbox command also is used
1879 * to indicate where received unsolicited frames from this FCF will be sent. By
1880 * default this routine will set up the FCF to forward all unsolicited frames
1881 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1882 * more complicated setups.
1883 **/
1884void
1885lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1886{
1887 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1888
1889 memset(mbox, 0, sizeof(*mbox));
1890 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1891 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1892 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1893 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1894 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1895 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1896 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1897 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1898 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1899 (~phba->fcf.addr_mode) & 0x3);
1900 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1901 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1902 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1903 }
1904}
1905
1906/**
1907 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1908 * @mbox: pointer to lpfc mbox command to initialize.
1909 * @fcfi: FCFI to be unregistered.
1910 *
1911 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1912 * The SLI Host uses the command to inactivate an FCFI.
1913 **/
1914void
1915lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1916{
1917 memset(mbox, 0, sizeof(*mbox));
1918 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1919 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1920}
1921
1922/**
1923 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1924 * @mbox: pointer to lpfc mbox command to initialize.
1925 * @ndlp: The nodelist structure that describes the RPI to resume.
1926 *
1927 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1928 * link event.
1929 **/
1930void
1931lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1932{
1933 struct lpfc_mbx_resume_rpi *resume_rpi;
1934
1935 memset(mbox, 0, sizeof(*mbox));
1936 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1937 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1938 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1939 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1940 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1941 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1942 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1943}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a976733398..e198c917c13e 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41c..09f659f77bb3 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 8032c5adb6a9..e9fa6762044a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 329
326 vports = lpfc_create_vport_work_array(phba); 330 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 331 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 332 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 333 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 334 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 335 new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 383
380 vports = lpfc_create_vport_work_array(phba); 384 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 385 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 387 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 388 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 389 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 431
428 vports = lpfc_create_vport_work_array(phba); 432 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 433 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 434 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 435 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 436 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 437 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 442}
439 443
440/** 444/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 445 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 446 * @vport: The virtual port for which this call being executed.
447 * @num_to_allocate: The requested number of buffers to allocate.
443 * 448 *
444 * This routine allocates a scsi buffer, which contains all the necessary 449 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 450 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 451 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 452 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 453 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 454 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 455 *
451 * Return codes: 456 * Return codes:
452 * NULL - Error 457 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 458 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 459 **/
455static struct lpfc_scsi_buf * 460static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 461lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 462{
458 struct lpfc_hba *phba = vport->phba; 463 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 464 struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 468 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 469 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 470 uint16_t iotag;
471 int bcnt;
466 472
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 473 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 474 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 475 if (!psb)
476 break;
477
478 /*
479 * Get memory from the pci pool to map the virt space to pci
480 * bus space for an I/O. The DMA buffer includes space for the
481 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
482 * necessary to support the sg_tablesize.
483 */
484 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
485 GFP_KERNEL, &psb->dma_handle);
486 if (!psb->data) {
487 kfree(psb);
488 break;
489 }
490
491 /* Initialize virtual ptrs to dma_buf region. */
492 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
493
494 /* Allocate iotag for psb->cur_iocbq. */
495 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
496 if (iotag == 0) {
497 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
498 psb->data, psb->dma_handle);
499 kfree(psb);
500 break;
501 }
502 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
503
504 psb->fcp_cmnd = psb->data;
505 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
506 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508
509 /* Initialize local short-hand pointers. */
510 bpl = psb->fcp_bpl;
511 pdma_phys_fcp_cmd = psb->dma_handle;
512 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
513 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
514 sizeof(struct fcp_rsp);
515
516 /*
517 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
518 * are sg list bdes. Initialize the first two and leave the
519 * rest for queuecommand.
520 */
521 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
522 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
523 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
524 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
526
527 /* Setup the physical region for the FCP RSP */
528 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
529 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
530 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
531 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
532 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
533
534 /*
535 * Since the IOCB for the FCP I/O is built into this
536 * lpfc_scsi_buf, initialize it with all known data now.
537 */
538 iocb = &psb->cur_iocbq.iocb;
539 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
540 if ((phba->sli_rev == 3) &&
541 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
542 /* fill in immediate fcp command BDE */
543 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
544 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
545 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
546 unsli3.fcp_ext.icd);
547 iocb->un.fcpi64.bdl.addrHigh = 0;
548 iocb->ulpBdeCount = 0;
549 iocb->ulpLe = 0;
550 /* fill in responce BDE */
551 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
552 BUFF_TYPE_BDE_64;
553 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
554 sizeof(struct fcp_rsp);
555 iocb->unsli3.fcp_ext.rbde.addrLow =
556 putPaddrLow(pdma_phys_fcp_rsp);
557 iocb->unsli3.fcp_ext.rbde.addrHigh =
558 putPaddrHigh(pdma_phys_fcp_rsp);
559 } else {
560 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
561 iocb->un.fcpi64.bdl.bdeSize =
562 (2 * sizeof(struct ulp_bde64));
563 iocb->un.fcpi64.bdl.addrLow =
564 putPaddrLow(pdma_phys_bpl);
565 iocb->un.fcpi64.bdl.addrHigh =
566 putPaddrHigh(pdma_phys_bpl);
567 iocb->ulpBdeCount = 1;
568 iocb->ulpLe = 1;
569 }
570 iocb->ulpClass = CLASS3;
571 psb->status = IOSTAT_SUCCESS;
572 /* Put it back into the SCSI buffer list */
573 lpfc_release_scsi_buf_s4(phba, psb);
470 574
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 575 }
483 576
484 /* Initialize virtual ptrs to dma_buf region. */ 577 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 578}
486 579
487 /* Allocate iotag for psb->cur_iocbq. */ 580/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 581 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 582 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 583 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 584 *
492 kfree (psb); 585 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 586 * FCP aborted xri.
587 **/
588void
589lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
590 struct sli4_wcqe_xri_aborted *axri)
591{
592 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
593 struct lpfc_scsi_buf *psb, *next_psb;
594 unsigned long iflag = 0;
595
596 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
597 list_for_each_entry_safe(psb, next_psb,
598 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
599 if (psb->cur_iocbq.sli4_xritag == xri) {
600 list_del(&psb->list);
601 psb->status = IOSTAT_SUCCESS;
602 spin_unlock_irqrestore(
603 &phba->sli4_hba.abts_scsi_buf_list_lock,
604 iflag);
605 lpfc_release_scsi_buf_s4(phba, psb);
606 return;
607 }
608 }
609 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
610 iflag);
611}
612
613/**
614 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
615 * @phba: pointer to lpfc hba data structure.
616 *
617 * This routine walks the list of scsi buffers that have been allocated and
618 * repost them to the HBA by using SGL block post. This is needed after a
619 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
620 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
621 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
622 *
623 * Returns: 0 = success, non-zero failure.
624 **/
625int
626lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
627{
628 struct lpfc_scsi_buf *psb;
629 int index, status, bcnt = 0, rcnt = 0, rc = 0;
630 LIST_HEAD(sblist);
631
632 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
633 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
634 if (psb) {
635 /* Remove from SCSI buffer list */
636 list_del(&psb->list);
637 /* Add it to a local SCSI buffer list */
638 list_add_tail(&psb->list, &sblist);
639 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
640 bcnt = rcnt;
641 rcnt = 0;
642 }
643 } else
644 /* A hole present in the XRI array, need to skip */
645 bcnt = rcnt;
646
647 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
648 /* End of XRI array for SCSI buffer, complete */
649 bcnt = rcnt;
650
651 /* Continue until collect up to a nembed page worth of sgls */
652 if (bcnt == 0)
653 continue;
654 /* Now, post the SCSI buffer list sgls as a block */
655 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
656 /* Reset SCSI buffer count for next round of posting */
657 bcnt = 0;
658 while (!list_empty(&sblist)) {
659 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
660 list);
661 if (status) {
662 /* Put this back on the abort scsi list */
663 psb->status = IOSTAT_LOCAL_REJECT;
664 psb->result = IOERR_ABORT_REQUESTED;
665 rc++;
666 } else
667 psb->status = IOSTAT_SUCCESS;
668 /* Put it back into the SCSI buffer list */
669 lpfc_release_scsi_buf_s4(phba, psb);
670 }
494 } 671 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 672 return rc;
673}
496 674
497 psb->fcp_cmnd = psb->data; 675/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 676 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 677 * @vport: The virtual port for which this call being executed.
500 sizeof(struct fcp_rsp); 678 * @num_to_allocate: The requested number of buffers to allocate.
679 *
680 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
681 * the scsi buffer contains all the necessary information needed to initiate
682 * a SCSI I/O.
683 *
684 * Return codes:
685 * int - number of scsi buffers that were allocated.
686 * 0 = failure, less than num_to_alloc is a partial failure.
687 **/
688static int
689lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
690{
691 struct lpfc_hba *phba = vport->phba;
692 struct lpfc_scsi_buf *psb;
693 struct sli4_sge *sgl;
694 IOCB_t *iocb;
695 dma_addr_t pdma_phys_fcp_cmd;
696 dma_addr_t pdma_phys_fcp_rsp;
697 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
698 uint16_t iotag, last_xritag = NO_XRI;
699 int status = 0, index;
700 int bcnt;
701 int non_sequential_xri = 0;
702 int rc = 0;
703 LIST_HEAD(sblist);
704
705 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
706 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
707 if (!psb)
708 break;
501 709
502 /* Initialize local short-hand pointers. */ 710 /*
503 bpl = psb->fcp_bpl; 711 * Get memory from the pci pool to map the virt space to pci bus
504 pdma_phys_fcp_cmd = psb->dma_handle; 712 * space for an I/O. The DMA buffer includes space for the
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 713 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 714 * necessary to support the sg_tablesize.
507 sizeof(struct fcp_rsp); 715 */
716 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
717 GFP_KERNEL, &psb->dma_handle);
718 if (!psb->data) {
719 kfree(psb);
720 break;
721 }
508 722
509 /* 723 /* Initialize virtual ptrs to dma_buf region. */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 724 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526 725
527 /* 726 /* Allocate iotag for psb->cur_iocbq. */
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 727 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
529 * initialize it with all known data now. 728 if (iotag == 0) {
530 */ 729 kfree(psb);
531 iocb = &psb->cur_iocbq.iocb; 730 break;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 731 }
533 if ((phba->sli_rev == 3) && 732
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 733 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
535 /* fill in immediate fcp command BDE */ 734 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 735 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
736 psb->data, psb->dma_handle);
737 kfree(psb);
738 break;
739 }
740 if (last_xritag != NO_XRI
741 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
742 non_sequential_xri = 1;
743 } else
744 list_add_tail(&psb->list, &sblist);
745 last_xritag = psb->cur_iocbq.sli4_xritag;
746
747 index = phba->sli4_hba.scsi_xri_cnt++;
748 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
749
750 psb->fcp_bpl = psb->data;
751 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
752 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
753 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
754 sizeof(struct fcp_cmnd));
755
756 /* Initialize local short-hand pointers. */
757 sgl = (struct sli4_sge *)psb->fcp_bpl;
758 pdma_phys_bpl = psb->dma_handle;
759 pdma_phys_fcp_cmd =
760 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
761 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
762 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
763
764 /*
765 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
766 * are sg list bdes. Initialize the first two and leave the
767 * rest for queuecommand.
768 */
769 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
770 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
771 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
772 bf_set(lpfc_sli4_sge_last, sgl, 0);
773 sgl->word2 = cpu_to_le32(sgl->word2);
774 sgl->word3 = cpu_to_le32(sgl->word3);
775 sgl++;
776
777 /* Setup the physical region for the FCP RSP */
778 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
779 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
780 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
781 bf_set(lpfc_sli4_sge_last, sgl, 1);
782 sgl->word2 = cpu_to_le32(sgl->word2);
783 sgl->word3 = cpu_to_le32(sgl->word3);
784
785 /*
786 * Since the IOCB for the FCP I/O is built into this
787 * lpfc_scsi_buf, initialize it with all known data now.
788 */
789 iocb = &psb->cur_iocbq.iocb;
790 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
791 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
792 /* setting the BLP size to 2 * sizeof BDE may not be correct.
793 * We are setting the bpl to point to out sgl. An sgl's
794 * entries are 16 bytes, a bpl entries are 12 bytes.
795 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 796 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 797 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 798 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 799 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 800 iocb->ulpLe = 1;
801 iocb->ulpClass = CLASS3;
802 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
803 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
804 else
805 pdma_phys_bpl1 = 0;
806 psb->dma_phys_bpl = pdma_phys_bpl;
807 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
808 if (non_sequential_xri) {
809 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
810 pdma_phys_bpl1,
811 psb->cur_iocbq.sli4_xritag);
812 if (status) {
813 /* Put this back on the abort scsi list */
814 psb->status = IOSTAT_LOCAL_REJECT;
815 psb->result = IOERR_ABORT_REQUESTED;
816 rc++;
817 } else
818 psb->status = IOSTAT_SUCCESS;
819 /* Put it back into the SCSI buffer list */
820 lpfc_release_scsi_buf_s4(phba, psb);
821 break;
822 }
823 }
824 if (bcnt) {
825 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
826 /* Reset SCSI buffer count for next round of posting */
827 while (!list_empty(&sblist)) {
828 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
829 list);
830 if (status) {
831 /* Put this back on the abort scsi list */
832 psb->status = IOSTAT_LOCAL_REJECT;
833 psb->result = IOERR_ABORT_REQUESTED;
834 rc++;
835 } else
836 psb->status = IOSTAT_SUCCESS;
837 /* Put it back into the SCSI buffer list */
838 lpfc_release_scsi_buf_s4(phba, psb);
839 }
558 } 840 }
559 iocb->ulpClass = CLASS3;
560 841
561 return psb; 842 return bcnt + non_sequential_xri - rc;
562} 843}
563 844
564/** 845/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 846 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 847 * @vport: The virtual port for which this call being executed.
848 * @num_to_allocate: The requested number of buffers to allocate.
849 *
850 * This routine wraps the actual SCSI buffer allocator function pointer from
851 * the lpfc_hba struct.
852 *
853 * Return codes:
854 * int - number of scsi buffers that were allocated.
855 * 0 = failure, less than num_to_alloc is a partial failure.
856 **/
857static inline int
858lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
859{
860 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
861}
862
863/**
864 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
865 * @phba: The HBA for which this call is being executed.
567 * 866 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 867 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 868 * and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 890}
592 891
593/** 892/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 893 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 894 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 895 * @psb: The scsi buffer which is being released.
597 * 896 *
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 898 * lpfc_scsi_buf_list list.
600 **/ 899 **/
601static void 900static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 901lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 902{
604 unsigned long iflag = 0; 903 unsigned long iflag = 0;
605 904
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 909}
611 910
612/** 911/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 912 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
913 * @phba: The Hba for which this call is being executed.
914 * @psb: The scsi buffer which is being released.
915 *
916 * This routine releases @psb scsi buffer by adding it to tail of @phba
917 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
918 * and cannot be reused for at least RA_TOV amount of time if it was
919 * aborted.
920 **/
921static void
922lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923{
924 unsigned long iflag = 0;
925
926 if (psb->status == IOSTAT_LOCAL_REJECT
927 && psb->result == IOERR_ABORT_REQUESTED) {
928 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
929 iflag);
930 psb->pCmd = NULL;
931 list_add_tail(&psb->list,
932 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
933 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
934 iflag);
935 } else {
936
937 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
938 psb->pCmd = NULL;
939 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
940 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
941 }
942}
943
944/**
945 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
946 * @phba: The Hba for which this call is being executed.
947 * @psb: The scsi buffer which is being released.
948 *
949 * This routine releases @psb scsi buffer by adding it to tail of @phba
950 * lpfc_scsi_buf_list list.
951 **/
952static void
953lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
954{
955
956 phba->lpfc_release_scsi_buf(phba, psb);
957}
958
959/**
960 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 961 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 962 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 963 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 964 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 965 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 966 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 967 * IOCB fields which are dependent on scsi command request buffer.
621 * 968 *
622 * Return codes: 969 * Return codes:
623 * 1 - Error 970 * 1 - Error
624 * 0 - Success 971 * 0 - Success
625 **/ 972 **/
626static int 973static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 974lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 975{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 976 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 977 struct scatterlist *sgel = NULL;
@@ -1412,6 +1759,133 @@ out:
1412} 1759}
1413 1760
1414/** 1761/**
1762 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1763 * @phba: The Hba for which this call is being executed.
1764 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1765 *
1766 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1767 * field of @lpfc_cmd for device with SLI-4 interface spec.
1768 *
1769 * Return codes:
1770 * 1 - Error
1771 * 0 - Success
1772 **/
1773static int
1774lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1775{
1776 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1777 struct scatterlist *sgel = NULL;
1778 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1779 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1780 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1781 dma_addr_t physaddr;
1782 uint32_t num_bde = 0;
1783 uint32_t dma_len;
1784 uint32_t dma_offset = 0;
1785 int nseg;
1786
1787 /*
1788 * There are three possibilities here - use scatter-gather segment, use
1789 * the single mapping, or neither. Start the lpfc command prep by
1790 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1791 * data bde entry.
1792 */
1793 if (scsi_sg_count(scsi_cmnd)) {
1794 /*
1795 * The driver stores the segment count returned from pci_map_sg
1796 * because this a count of dma-mappings used to map the use_sg
1797 * pages. They are not guaranteed to be the same for those
1798 * architectures that implement an IOMMU.
1799 */
1800
1801 nseg = scsi_dma_map(scsi_cmnd);
1802 if (unlikely(!nseg))
1803 return 1;
1804 sgl += 1;
1805 /* clear the last flag in the fcp_rsp map entry */
1806 sgl->word2 = le32_to_cpu(sgl->word2);
1807 bf_set(lpfc_sli4_sge_last, sgl, 0);
1808 sgl->word2 = cpu_to_le32(sgl->word2);
1809 sgl += 1;
1810
1811 lpfc_cmd->seg_cnt = nseg;
1812 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1813 printk(KERN_ERR "%s: Too many sg segments from "
1814 "dma_map_sg. Config %d, seg_cnt %d\n",
1815 __func__, phba->cfg_sg_seg_cnt,
1816 lpfc_cmd->seg_cnt);
1817 scsi_dma_unmap(scsi_cmnd);
1818 return 1;
1819 }
1820
1821 /*
1822 * The driver established a maximum scatter-gather segment count
1823 * during probe that limits the number of sg elements in any
1824 * single scsi command. Just run through the seg_cnt and format
1825 * the sge's.
1826 * When using SLI-3 the driver will try to fit all the BDEs into
1827 * the IOCB. If it can't then the BDEs get added to a BPL as it
1828 * does for SLI-2 mode.
1829 */
1830 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1831 physaddr = sg_dma_address(sgel);
1832 dma_len = sg_dma_len(sgel);
1833 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1834 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1835 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1836 if ((num_bde + 1) == nseg)
1837 bf_set(lpfc_sli4_sge_last, sgl, 1);
1838 else
1839 bf_set(lpfc_sli4_sge_last, sgl, 0);
1840 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1841 sgl->word2 = cpu_to_le32(sgl->word2);
1842 sgl->word3 = cpu_to_le32(sgl->word3);
1843 dma_offset += dma_len;
1844 sgl++;
1845 }
1846 } else {
1847 sgl += 1;
1848 /* clear the last flag in the fcp_rsp map entry */
1849 sgl->word2 = le32_to_cpu(sgl->word2);
1850 bf_set(lpfc_sli4_sge_last, sgl, 1);
1851 sgl->word2 = cpu_to_le32(sgl->word2);
1852 }
1853
1854 /*
1855 * Finish initializing those IOCB fields that are dependent on the
1856 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1857 * explicitly reinitialized.
1858 * all iocb memory resources are reused.
1859 */
1860 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1861
1862 /*
1863 * Due to difference in data length between DIF/non-DIF paths,
1864 * we need to set word 4 of IOCB here
1865 */
1866 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1867 return 0;
1868}
1869
1870/**
1871 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1872 * @phba: The Hba for which this call is being executed.
1873 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1874 *
1875 * This routine wraps the actual DMA mapping function pointer from the
1876 * lpfc_hba struct.
1877 *
1878 * Return codes:
1879 * 1 - Error
1880 * 0 - Success
1881 **/
1882static inline int
1883lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1884{
1885 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1886}
1887
1888/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1889 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1890 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1891 * @vport: Pointer to vport object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1978}
1505 1979
1506/** 1980/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
1508 * @phba: The Hba for which this call is being executed. 1982 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 1983 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 1984 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 1985 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 1986 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 1987 **/
1514static void 1988static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 1990{
1517 /* 1991 /*
1518 * There are only two special cases to consider. (1) the scsi command 1992 * There are only two special cases to consider. (1) the scsi command
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1529} 2003}
1530 2004
1531/** 2005/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
1532 * lpfc_handler_fcp_err - FCP response handler 2036 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed. 2037 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2180 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2181 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2182 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2183 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2184 *
1681 * This routine assigns scsi command result by looking into response IOCB 2185 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2186 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2461}
1958 2462
1959/** 2463/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
1961 * @vport: The virtual port for which this call is being executed. 2465 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2466 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2467 * @pnode: Pointer to lpfc_nodelist.
1964 * 2468 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2469 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2470 * to transfer for device with SLI3 interface spec.
1967 **/ 2471 **/
1968static void 2472static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode) 2474 struct lpfc_nodelist *pnode)
1971{ 2475{
1972 struct lpfc_hba *phba = vport->phba; 2476 struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2517 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2518 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2519 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2520 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2521 iocb_cmd->un.fcpi.fcpi_parm = 0;
2522 iocb_cmd->ulpPU = 0;
2523 } else
2524 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2525 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2526 phba->fc4OutputRequests++;
2020 } else { 2527 } else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2558}
2052 2559
2053/** 2560/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2601 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2603 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2604 * @task_mgmt_cmd: SCSI task management command.
2059 * 2605 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2606 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2607 * for device with SLI-3 interface spec.
2061 * 2608 *
2062 * Return codes: 2609 * Return codes:
2063 * 0 - Error 2610 * 0 - Error
2064 * 1 - Success 2611 * 1 - Success
2065 **/ 2612 **/
2066static int 2613static int
2067lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2068 struct lpfc_scsi_buf *lpfc_cmd, 2615 struct lpfc_scsi_buf *lpfc_cmd,
2069 unsigned int lun, 2616 unsigned int lun,
2070 uint8_t task_mgmt_cmd) 2617 uint8_t task_mgmt_cmd)
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2114} 2661}
2115 2662
2116/** 2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693
2694/**
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2717 task_mgmt_cmd);
2718}
2719
2720/**
2721 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2722 * @phba: The hba struct for which this call is being executed.
2723 * @dev_grp: The HBA PCI-Device group number.
2724 *
2725 * This routine sets up the SCSI interface API function jump table in @phba
2726 * struct.
2727 * Returns: 0 - success, -ENODEV - failure.
2728 **/
2729int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{
2732
2733 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break;
2743 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break;
2752 default:
2753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2754 "1418 Invalid HBA PCI-device group: 0x%x\n",
2755 dev_grp);
2756 return -ENODEV;
2757 break;
2758 }
2759 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2760 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2761 return 0;
2762}
2763
2764/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2765 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2766 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2767 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba, 2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2182 &phba->sli.ring[phba->sli.fcp_ring], 2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) { 2831 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) { 2832 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2952 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2953 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2954 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2955 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2956 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2957 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3073 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 3074
2429 atomic_inc(&ndlp->cmd_pending); 3075 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 3076 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3077 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 3078 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 3079 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 3136 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 3138 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 3139 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 3140 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 3141 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3176 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3177 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3178 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3179 if (phba->sli_rev == LPFC_SLI_REV4)
3180 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3181 else
3182 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3183
2536 icmd->ulpLe = 1; 3184 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3185 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3190
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3191 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3192 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3193 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3194 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3195 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3196 ret = FAILED;
2548 goto out; 3197 goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2668 "0703 Issue target reset to TGT %d LUN %d " 3317 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671 status = lpfc_sli_issue_iocb_wait(phba, 3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3321 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3322 if (status == IOCB_TIMEDOUT) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3473{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3474 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3475 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3476 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3477 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3478 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3479 int num_allocated = 0;
2833 3480
2834 if (!rport || fc_remote_port_chkready(rport)) 3481 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3482 return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3510 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3511 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3512 }
2866 3513 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3514 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3515 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3516 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3517 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3518 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3519 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3520 }
2881 return 0; 3521 return 0;
2882} 3522}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa29..65dfc8bd5b49 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba4..ff04daf18f48 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE)
4143 return -EIO;
4144
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4147 kfree(mp);
4148 return 0;
4149}
4150
4151/**
4152 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4153 * @phba: pointer to lpfc hba data structure.
4154 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4155 * @vpd: pointer to the memory to hold resulting port vpd data.
4156 * @vpd_size: On input, the number of bytes allocated to @vpd.
4157 * On output, the number of data bytes in @vpd.
4158 *
4159 * This routine executes a READ_REV SLI4 mailbox command. In
4160 * addition, this routine gets the port vpd data.
4161 *
4162 * Return codes
4163 * 0 - sucessful
4164 * ENOMEM - could not allocated memory.
4165 **/
4166static int
4167lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4168 uint8_t *vpd, uint32_t *vpd_size)
4169{
4170 int rc = 0;
4171 uint32_t dma_size;
4172 struct lpfc_dmabuf *dmabuf;
4173 struct lpfc_mqe *mqe;
4174
4175 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4176 if (!dmabuf)
4177 return -ENOMEM;
4178
4179 /*
4180 * Get a DMA buffer for the vpd data resulting from the READ_REV
4181 * mailbox command.
4182 */
4183 dma_size = *vpd_size;
4184 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4185 dma_size,
4186 &dmabuf->phys,
4187 GFP_KERNEL);
4188 if (!dmabuf->virt) {
4189 kfree(dmabuf);
4190 return -ENOMEM;
4191 }
4192 memset(dmabuf->virt, 0, dma_size);
4193
4194 /*
4195 * The SLI4 implementation of READ_REV conflicts at word1,
4196 * bits 31:16 and SLI4 adds vpd functionality not present
4197 * in SLI3. This code corrects the conflicts.
4198 */
4199 lpfc_read_rev(phba, mboxq);
4200 mqe = &mboxq->u.mqe;
4201 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4202 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4203 mqe->un.read_rev.word1 &= 0x0000FFFF;
4204 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4205 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4206
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc) {
4209 dma_free_coherent(&phba->pcidev->dev, dma_size,
4210 dmabuf->virt, dmabuf->phys);
4211 return -EIO;
4212 }
4213
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /*
4236 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than
4238 * case and update the caller's size.
4239 */
4240 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4241 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4242
4243 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4244 dma_free_coherent(&phba->pcidev->dev, dma_size,
4245 dmabuf->virt, dmabuf->phys);
4246 kfree(dmabuf);
4247 return 0;
4248}
4249
4250/**
4251 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4252 * @phba: pointer to lpfc hba data structure.
4253 *
4254 * This routine is called to explicitly arm the SLI4 device's completion and
4255 * event queues
4256 **/
4257static void
4258lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4259{
4260 uint8_t fcp_eqidx;
4261
4262 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4263 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4264 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4265 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4266 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4267 LPFC_QUEUE_REARM);
4268 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4269 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4270 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4271 LPFC_QUEUE_REARM);
4272}
4273
4274/**
4275 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4276 * @phba: Pointer to HBA context object.
4277 *
4278 * This function is the main SLI4 device intialization PCI function. This
4279 * function is called by the HBA intialization code, HBA reset code and
4280 * HBA error attention handler code. Caller is not required to hold any
4281 * locks.
4282 **/
4283int
4284lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4285{
4286 int rc;
4287 LPFC_MBOXQ_t *mboxq;
4288 struct lpfc_mqe *mqe;
4289 uint8_t *vpd;
4290 uint32_t vpd_size;
4291 uint32_t ftr_rsp = 0;
4292 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4293 struct lpfc_vport *vport = phba->pport;
4294 struct lpfc_dmabuf *mp;
4295
4296 /* Perform a PCI function reset to start from clean */
4297 rc = lpfc_pci_function_reset(phba);
4298 if (unlikely(rc))
4299 return -ENODEV;
4300
4301 /* Check the HBA Host Status Register for readyness */
4302 rc = lpfc_sli4_post_status_check(phba);
4303 if (unlikely(rc))
4304 return -ENODEV;
4305 else {
4306 spin_lock_irq(&phba->hbalock);
4307 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4308 spin_unlock_irq(&phba->hbalock);
4309 }
4310
4311 /*
4312 * Allocate a single mailbox container for initializing the
4313 * port.
4314 */
4315 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4316 if (!mboxq)
4317 return -ENOMEM;
4318
4319 /*
4320 * Continue initialization with default values even if driver failed
4321 * to read FCoE param config regions
4322 */
4323 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4325 "2570 Failed to read FCoE parameters \n");
4326
4327 /* Issue READ_REV to collect vpd and FW information. */
4328 vpd_size = PAGE_SIZE;
4329 vpd = kzalloc(vpd_size, GFP_KERNEL);
4330 if (!vpd) {
4331 rc = -ENOMEM;
4332 goto out_free_mbox;
4333 }
4334
4335 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4336 if (unlikely(rc))
4337 goto out_free_vpd;
4338
4339 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) ||
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO;
4349 goto out_free_vpd;
4350 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /*
4356 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure
4358 * is not fatal as the driver will use generic values.
4359 */
4360 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4361 if (unlikely(!rc)) {
4362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4363 "0377 Error %d parsing vpd. "
4364 "Using defaults.\n", rc);
4365 rc = 0;
4366 }
4367
4368 /* By now, we should determine the SLI revision, hard code for now */
4369 phba->sli_rev = LPFC_SLI_REV4;
4370
4371 /*
4372 * Discover the port's supported feature set and match it against the
4373 * hosts requests.
4374 */
4375 lpfc_request_features(phba, mboxq);
4376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4377 if (unlikely(rc)) {
4378 rc = -EIO;
4379 goto out_free_vpd;
4380 }
4381
4382 /*
4383 * The port must support FCP initiator mode as this is the
4384 * only mode running in the host.
4385 */
4386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4388 "0378 No support for fcpi mode.\n");
4389 ftr_rsp++;
4390 }
4391
4392 /*
4393 * If the port cannot support the host's requested features
4394 * then turn off the global config parameters to disable the
4395 * feature in the driver. This is not a fatal error.
4396 */
4397 if ((phba->cfg_enable_bg) &&
4398 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4399 ftr_rsp++;
4400
4401 if (phba->max_vpi && phba->cfg_enable_npiv &&
4402 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4403 ftr_rsp++;
4404
4405 if (ftr_rsp) {
4406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4407 "0379 Feature Mismatch Data: x%08x %08x "
4408 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4409 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4410 phba->cfg_enable_npiv, phba->max_vpi);
4411 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4412 phba->cfg_enable_bg = 0;
4413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4414 phba->cfg_enable_npiv = 0;
4415 }
4416
4417 /* These SLI3 features are assumed in SLI4 */
4418 spin_lock_irq(&phba->hbalock);
4419 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4420 spin_unlock_irq(&phba->hbalock);
4421
4422 /* Read the port's service parameters. */
4423 lpfc_read_sparam(phba, mboxq, vport->vpi);
4424 mboxq->vport = vport;
4425 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4426 mp = (struct lpfc_dmabuf *) mboxq->context1;
4427 if (rc == MBX_SUCCESS) {
4428 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4429 rc = 0;
4430 }
4431
4432 /*
4433 * This memory was allocated by the lpfc_read_sparam routine. Release
4434 * it to the mbuf pool.
4435 */
4436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4437 kfree(mp);
4438 mboxq->context1 = NULL;
4439 if (unlikely(rc)) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4441 "0382 READ_SPARAM command failed "
4442 "status %d, mbxStatus x%x\n",
4443 rc, bf_get(lpfc_mqe_status, mqe));
4444 phba->link_state = LPFC_HBA_ERROR;
4445 rc = -EIO;
4446 goto out_free_vpd;
4447 }
4448
4449 if (phba->cfg_soft_wwnn)
4450 u64_to_wwn(phba->cfg_soft_wwnn,
4451 vport->fc_sparam.nodeName.u.wwn);
4452 if (phba->cfg_soft_wwpn)
4453 u64_to_wwn(phba->cfg_soft_wwpn,
4454 vport->fc_sparam.portName.u.wwn);
4455 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4456 sizeof(struct lpfc_name));
4457 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4458 sizeof(struct lpfc_name));
4459
4460 /* Update the fc_host data structures with new wwn. */
4461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4463
4464 /* Register SGL pool to the device using non-embedded mailbox command */
4465 rc = lpfc_sli4_post_sgl_list(phba);
4466 if (unlikely(rc)) {
4467 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4468 "0582 Error %d during sgl post operation", rc);
4469 rc = -ENODEV;
4470 goto out_free_vpd;
4471 }
4472
4473 /* Register SCSI SGL pool to the device */
4474 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4475 if (unlikely(rc)) {
4476 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4477 "0383 Error %d during scsi sgl post opeation",
4478 rc);
4479 /* Some Scsi buffers were moved to the abort scsi list */
4480 /* A pci function reset will repost them */
4481 rc = -ENODEV;
4482 goto out_free_vpd;
4483 }
4484
4485 /* Post the rpi header region to the device. */
4486 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4487 if (unlikely(rc)) {
4488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4489 "0393 Error %d during rpi post operation\n",
4490 rc);
4491 rc = -ENODEV;
4492 goto out_free_vpd;
4493 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496
4497 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba);
4499 if (unlikely(rc)) {
4500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4501 "0381 Error %d during queue setup.\n ", rc);
4502 goto out_stop_timers;
4503 }
4504
4505 /* Arm the CQs and then EQs on device */
4506 lpfc_sli4_arm_cqeq_intr(phba);
4507
4508 /* Indicate device interrupt mode */
4509 phba->sli4_hba.intr_enable = 1;
4510
4511 /* Allow asynchronous mailbox command to go through */
4512 spin_lock_irq(&phba->hbalock);
4513 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4514 spin_unlock_irq(&phba->hbalock);
4515
4516 /* Post receive buffers to the device */
4517 lpfc_sli4_rb_setup(phba);
4518
4519 /* Start the ELS watchdog timer */
4520 /*
4521 * The driver for SLI4 is not yet ready to process timeouts
4522 * or interrupts. Once it is, the comment bars can be removed.
4523 */
4524 /* mod_timer(&vport->els_tmofunc,
4525 * jiffies + HZ * (phba->fc_ratov*2)); */
4526
4527 /* Start heart beat timer */
4528 mod_timer(&phba->hb_tmofunc,
4529 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4530 phba->hb_outstanding = 0;
4531 phba->last_completion_time = jiffies;
4532
4533 /* Start error attention (ERATT) polling timer */
4534 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4535
4536 /*
4537 * The port is ready, set the host's link state to LINK_DOWN
4538 * in preparation for link interrupts.
4539 */
4540 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4541 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4542 lpfc_set_loopback_flag(phba);
4543 /* Change driver state to LPFC_LINK_DOWN right before init link */
4544 spin_lock_irq(&phba->hbalock);
4545 phba->link_state = LPFC_LINK_DOWN;
4546 spin_unlock_irq(&phba->hbalock);
4547 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4548 if (unlikely(rc != MBX_NOT_FINISHED)) {
4549 kfree(vpd);
4550 return 0;
4551 } else
4552 rc = -EIO;
4553
4554 /* Unset all the queues set up in this routine when error out */
4555 if (rc)
4556 lpfc_sli4_queue_unset(phba);
4557
4558out_stop_timers:
4559 if (rc)
4560 lpfc_stop_hba_timers(phba);
4561out_free_vpd:
4562 kfree(vpd);
4563out_free_mbox:
4564 mempool_free(mboxq, phba->mbox_mem_pool);
4565 return rc;
4566}
3203 4567
3204/** 4568/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4569 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4608lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4609{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4610 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4611 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4612 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4613 struct lpfc_sli_ring *pring;
3250 4614
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4645 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4646 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4647 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4648 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4649 spin_unlock_irq(&phba->hbalock);
3286 4650
3287 pring = &psli->ring[psli->fcp_ring]; 4651 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4653
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4655 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4656
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4657 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4658 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4659}
3308 4660
3309/** 4661/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4662 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4663 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4664 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4665 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4666 *
3315 * This function is called by discovery code and HBA management code 4667 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4668 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4669 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4670 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4671 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4672 * mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4684 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4685 * the function.
3334 **/ 4686 **/
3335int 4687static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4688lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4689 uint32_t flag)
3337{ 4690{
3338 MAILBOX_t *mb; 4691 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4692 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4702 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4703 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4704 /* processing mbox queue from intr_handler */
4705 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4706 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4707 return MBX_SUCCESS;
4708 }
3352 processing_queue = 1; 4709 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4710 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4711 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4722 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4723 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4724 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4725 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4726 dump_stack();
3370 goto out_not_finished; 4727 goto out_not_finished;
3371 } 4728 }
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4742
3386 psli = &phba->sli; 4743 psli = &phba->sli;
3387 4744
3388 mb = &pmbox->mb; 4745 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4746 status = MBX_SUCCESS;
3390 4747
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4748 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4750
3394 /* Mbox command <mbxCommand> cannot issue */ 4751 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4753 "(%d):0311 Mailbox command x%x cannot "
4754 "issue Data: x%x x%x\n",
4755 pmbox->vport ? pmbox->vport->vpi : 0,
4756 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4757 goto out_not_finished;
3397 } 4758 }
3398 4759
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4760 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4761 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4762 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4763 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4764 "(%d):2528 Mailbox command x%x cannot "
4765 "issue Data: x%x x%x\n",
4766 pmbox->vport ? pmbox->vport->vpi : 0,
4767 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4768 goto out_not_finished;
3404 } 4769 }
3405 4770
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4778 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4779
3415 /* Mbox command <mbxCommand> cannot issue */ 4780 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4782 "(%d):2529 Mailbox command x%x "
4783 "cannot issue Data: x%x x%x\n",
4784 pmbox->vport ? pmbox->vport->vpi : 0,
4785 pmbox->u.mb.mbxCommand,
4786 psli->sli_flag, flag);
3417 goto out_not_finished; 4787 goto out_not_finished;
3418 } 4788 }
3419 4789
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4790 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4792 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4793 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4794 "(%d):2530 Mailbox command x%x "
4795 "cannot issue Data: x%x x%x\n",
4796 pmbox->vport ? pmbox->vport->vpi : 0,
4797 pmbox->u.mb.mbxCommand,
4798 psli->sli_flag, flag);
3424 goto out_not_finished; 4799 goto out_not_finished;
3425 } 4800 }
3426 4801
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4837
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4838 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4839 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4840 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4841 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4842 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4843 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4844 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4846 "(%d):2531 Mailbox command x%x "
4847 "cannot issue Data: x%x x%x\n",
4848 pmbox->vport ? pmbox->vport->vpi : 0,
4849 pmbox->u.mb.mbxCommand,
4850 psli->sli_flag, flag);
3471 goto out_not_finished; 4851 goto out_not_finished;
3472 } 4852 }
3473 /* timeout active mbox command */ 4853 /* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4886 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4887 mb->mbxOwner = OWN_CHIP;
3508 4888
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4889 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4890 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4891 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4892 } else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4909
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4910 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4911 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4912 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4913 }
3534 } 4914 }
3535 4915
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4932 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4933 readl(phba->CAregaddr); /* flush */
3554 4934
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4935 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4936 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4937 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4938 word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4971 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4972 }
3593 4973
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4974 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4975 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4976 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4977 word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4984 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4985 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4986 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4987 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4988 word0 = slimword0;
3609 } 4989 }
3610 } 4990 }
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 4996 ha_copy = readl(phba->HAregaddr);
3617 } 4997 }
3618 4998
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4999 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5000 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5001 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5002 } else {
@@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5023
3644out_not_finished: 5024out_not_finished:
3645 if (processing_queue) { 5025 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5026 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5027 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5028 }
3649 return MBX_NOT_FINISHED; 5029 return MBX_NOT_FINISHED;
3650} 5030}
3651 5031
3652/** 5032/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object.
5036 *
5037 * The function posts a mailbox to the port. The mailbox is expected
5038 * to be comletely filled in and ready for the port to operate on it.
5039 * This routine executes a synchronous completion operation on the
5040 * mailbox by polling for its completion.
5041 *
5042 * The caller must not be holding any locks when calling this routine.
5043 *
5044 * Returns:
5045 * MBX_SUCCESS - mailbox posted successfully
5046 * Any of the MBX error values.
5047 **/
5048static int
5049lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5050{
5051 int rc = MBX_SUCCESS;
5052 unsigned long iflag;
5053 uint32_t db_ready;
5054 uint32_t mcqe_status;
5055 uint32_t mbx_cmnd;
5056 unsigned long timeout;
5057 struct lpfc_sli *psli = &phba->sli;
5058 struct lpfc_mqe *mb = &mboxq->u.mqe;
5059 struct lpfc_bmbx_create *mbox_rgn;
5060 struct dma_address *dma_address;
5061 struct lpfc_register bmbx_reg;
5062
5063 /*
5064 * Only one mailbox can be active to the bootstrap mailbox region
5065 * at a time and there is no queueing provided.
5066 */
5067 spin_lock_irqsave(&phba->hbalock, iflag);
5068 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5069 spin_unlock_irqrestore(&phba->hbalock, iflag);
5070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5071 "(%d):2532 Mailbox command x%x (x%x) "
5072 "cannot issue Data: x%x x%x\n",
5073 mboxq->vport ? mboxq->vport->vpi : 0,
5074 mboxq->u.mb.mbxCommand,
5075 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5076 psli->sli_flag, MBX_POLL);
5077 return MBXERR_ERROR;
5078 }
5079 /* The server grabs the token and owns it until release */
5080 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5081 phba->sli.mbox_active = mboxq;
5082 spin_unlock_irqrestore(&phba->hbalock, iflag);
5083
5084 /*
5085 * Initialize the bootstrap memory region to avoid stale data areas
5086 * in the mailbox post. Then copy the caller's mailbox contents to
5087 * the bmbx mailbox region.
5088 */
5089 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5090 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5091 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5092 sizeof(struct lpfc_mqe));
5093
5094 /* Post the high mailbox dma address to the port and wait for ready. */
5095 dma_address = &phba->sli4_hba.bmbx.dma_address;
5096 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5097
5098 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5099 * 1000) + jiffies;
5100 do {
5101 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5102 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5103 if (!db_ready)
5104 msleep(2);
5105
5106 if (time_after(jiffies, timeout)) {
5107 rc = MBXERR_ERROR;
5108 goto exit;
5109 }
5110 } while (!db_ready);
5111
5112 /* Post the low mailbox dma address to the port. */
5113 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5114 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5115 * 1000) + jiffies;
5116 do {
5117 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5118 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5119 if (!db_ready)
5120 msleep(2);
5121
5122 if (time_after(jiffies, timeout)) {
5123 rc = MBXERR_ERROR;
5124 goto exit;
5125 }
5126 } while (!db_ready);
5127
5128 /*
5129 * Read the CQ to ensure the mailbox has completed.
5130 * If so, update the mailbox status so that the upper layers
5131 * can complete the request normally.
5132 */
5133 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5134 sizeof(struct lpfc_mqe));
5135 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5136 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5137 sizeof(struct lpfc_mcqe));
5138 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5139
5140 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5141 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5142 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5143 rc = MBXERR_ERROR;
5144 }
5145
5146 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5147 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5148 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5149 " x%x x%x CQ: x%x x%x x%x x%x\n",
5150 mboxq->vport ? mboxq->vport->vpi : 0,
5151 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5152 bf_get(lpfc_mqe_status, mb),
5153 mb->un.mb_words[0], mb->un.mb_words[1],
5154 mb->un.mb_words[2], mb->un.mb_words[3],
5155 mb->un.mb_words[4], mb->un.mb_words[5],
5156 mb->un.mb_words[6], mb->un.mb_words[7],
5157 mb->un.mb_words[8], mb->un.mb_words[9],
5158 mb->un.mb_words[10], mb->un.mb_words[11],
5159 mb->un.mb_words[12], mboxq->mcqe.word0,
5160 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5161 mboxq->mcqe.trailer);
5162exit:
5163 /* We are holding the token, no needed for lock when release */
5164 spin_lock_irqsave(&phba->hbalock, iflag);
5165 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5166 phba->sli.mbox_active = NULL;
5167 spin_unlock_irqrestore(&phba->hbalock, iflag);
5168 return rc;
5169}
5170
5171/**
5172 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5173 * @phba: Pointer to HBA context object.
5174 * @pmbox: Pointer to mailbox object.
5175 * @flag: Flag indicating how the mailbox need to be processed.
5176 *
5177 * This function is called by discovery code and HBA management code to submit
5178 * a mailbox command to firmware with SLI-4 interface spec.
5179 *
5180 * Return codes the caller owns the mailbox command after the return of the
5181 * function.
5182 **/
5183static int
5184lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5185 uint32_t flag)
5186{
5187 struct lpfc_sli *psli = &phba->sli;
5188 unsigned long iflags;
5189 int rc;
5190
5191 /* Detect polling mode and jump to a handler */
5192 if (!phba->sli4_hba.intr_enable) {
5193 if (flag == MBX_POLL)
5194 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5195 else
5196 rc = -EIO;
5197 if (rc != MBX_SUCCESS)
5198 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5199 "(%d):2541 Mailbox command x%x "
5200 "(x%x) cannot issue Data: x%x x%x\n",
5201 mboxq->vport ? mboxq->vport->vpi : 0,
5202 mboxq->u.mb.mbxCommand,
5203 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5204 psli->sli_flag, flag);
5205 return rc;
5206 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) "
5209 "cannot issue Data: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag);
5214 return -EIO;
5215 }
5216
5217 /* Now, interrupt mode asynchrous mailbox command */
5218 rc = lpfc_mbox_cmd_check(phba, mboxq);
5219 if (rc) {
5220 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5221 "(%d):2543 Mailbox command x%x (x%x) "
5222 "cannot issue Data: x%x x%x\n",
5223 mboxq->vport ? mboxq->vport->vpi : 0,
5224 mboxq->u.mb.mbxCommand,
5225 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5226 psli->sli_flag, flag);
5227 goto out_not_finished;
5228 }
5229 rc = lpfc_mbox_dev_check(phba);
5230 if (unlikely(rc)) {
5231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5232 "(%d):2544 Mailbox command x%x (x%x) "
5233 "cannot issue Data: x%x x%x\n",
5234 mboxq->vport ? mboxq->vport->vpi : 0,
5235 mboxq->u.mb.mbxCommand,
5236 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5237 psli->sli_flag, flag);
5238 goto out_not_finished;
5239 }
5240
5241 /* Put the mailbox command to the driver internal FIFO */
5242 psli->slistat.mbox_busy++;
5243 spin_lock_irqsave(&phba->hbalock, iflags);
5244 lpfc_mbox_put(phba, mboxq);
5245 spin_unlock_irqrestore(&phba->hbalock, iflags);
5246 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5247 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5248 "x%x (x%x) x%x x%x x%x\n",
5249 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5250 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5251 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5252 phba->pport->port_state,
5253 psli->sli_flag, MBX_NOWAIT);
5254 /* Wake up worker thread to transport mailbox command from head */
5255 lpfc_worker_wake_up(phba);
5256
5257 return MBX_BUSY;
5258
5259out_not_finished:
5260 return MBX_NOT_FINISHED;
5261}
5262
5263/**
5264 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5265 * @phba: Pointer to HBA context object.
5266 *
5267 * This function is called by worker thread to send a mailbox command to
5268 * SLI4 HBA firmware.
5269 *
5270 **/
5271int
5272lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5273{
5274 struct lpfc_sli *psli = &phba->sli;
5275 LPFC_MBOXQ_t *mboxq;
5276 int rc = MBX_SUCCESS;
5277 unsigned long iflags;
5278 struct lpfc_mqe *mqe;
5279 uint32_t mbx_cmnd;
5280
5281 /* Check interrupt mode before post async mailbox command */
5282 if (unlikely(!phba->sli4_hba.intr_enable))
5283 return MBX_NOT_FINISHED;
5284
5285 /* Check for mailbox command service token */
5286 spin_lock_irqsave(&phba->hbalock, iflags);
5287 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5288 spin_unlock_irqrestore(&phba->hbalock, iflags);
5289 return MBX_NOT_FINISHED;
5290 }
5291 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5292 spin_unlock_irqrestore(&phba->hbalock, iflags);
5293 return MBX_NOT_FINISHED;
5294 }
5295 if (unlikely(phba->sli.mbox_active)) {
5296 spin_unlock_irqrestore(&phba->hbalock, iflags);
5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5298 "0384 There is pending active mailbox cmd\n");
5299 return MBX_NOT_FINISHED;
5300 }
5301 /* Take the mailbox command service token */
5302 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5303
5304 /* Get the next mailbox command from head of queue */
5305 mboxq = lpfc_mbox_get(phba);
5306
5307 /* If no more mailbox command waiting for post, we're done */
5308 if (!mboxq) {
5309 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5310 spin_unlock_irqrestore(&phba->hbalock, iflags);
5311 return MBX_SUCCESS;
5312 }
5313 phba->sli.mbox_active = mboxq;
5314 spin_unlock_irqrestore(&phba->hbalock, iflags);
5315
5316 /* Check device readiness for posting mailbox command */
5317 rc = lpfc_mbox_dev_check(phba);
5318 if (unlikely(rc))
5319 /* Driver clean routine will clean up pending mailbox */
5320 goto out_not_finished;
5321
5322 /* Prepare the mbox command to be posted */
5323 mqe = &mboxq->u.mqe;
5324 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5325
5326 /* Start timer for the mbox_tmo and log some mailbox post messages */
5327 mod_timer(&psli->mbox_tmo, (jiffies +
5328 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5329
5330 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5331 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5332 "x%x x%x\n",
5333 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5334 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5335 phba->pport->port_state, psli->sli_flag);
5336
5337 if (mbx_cmnd != MBX_HEARTBEAT) {
5338 if (mboxq->vport) {
5339 lpfc_debugfs_disc_trc(mboxq->vport,
5340 LPFC_DISC_TRC_MBOX_VPORT,
5341 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5342 mbx_cmnd, mqe->un.mb_words[0],
5343 mqe->un.mb_words[1]);
5344 } else {
5345 lpfc_debugfs_disc_trc(phba->pport,
5346 LPFC_DISC_TRC_MBOX,
5347 "MBOX Send: cmd:x%x mb:x%x x%x",
5348 mbx_cmnd, mqe->un.mb_words[0],
5349 mqe->un.mb_words[1]);
5350 }
5351 }
5352 psli->slistat.mbox_cmd++;
5353
5354 /* Post the mailbox command to the port */
5355 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5356 if (rc != MBX_SUCCESS) {
5357 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5358 "(%d):2533 Mailbox command x%x (x%x) "
5359 "cannot issue Data: x%x x%x\n",
5360 mboxq->vport ? mboxq->vport->vpi : 0,
5361 mboxq->u.mb.mbxCommand,
5362 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5363 psli->sli_flag, MBX_NOWAIT);
5364 goto out_not_finished;
5365 }
5366
5367 return rc;
5368
5369out_not_finished:
5370 spin_lock_irqsave(&phba->hbalock, iflags);
5371 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5372 __lpfc_mbox_cmpl_put(phba, mboxq);
5373 /* Release the token */
5374 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5375 phba->sli.mbox_active = NULL;
5376 spin_unlock_irqrestore(&phba->hbalock, iflags);
5377
5378 return MBX_NOT_FINISHED;
5379}
5380
5381/**
5382 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5383 * @phba: Pointer to HBA context object.
5384 * @pmbox: Pointer to mailbox object.
5385 * @flag: Flag indicating how the mailbox need to be processed.
5386 *
5387 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5388 * the API jump table function pointer from the lpfc_hba struct.
5389 *
5390 * Return codes the caller owns the mailbox command after the return of the
5391 * function.
5392 **/
5393int
5394lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5395{
5396 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5397}
5398
5399/**
5400 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5401 * @phba: The hba struct for which this call is being executed.
5402 * @dev_grp: The HBA PCI-Device group number.
5403 *
5404 * This routine sets up the mbox interface API function jump table in @phba
5405 * struct.
5406 * Returns: 0 - success, -ENODEV - failure.
5407 **/
5408int
5409lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5410{
5411
5412 switch (dev_grp) {
5413 case LPFC_PCI_DEV_LP:
5414 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5415 phba->lpfc_sli_handle_slow_ring_event =
5416 lpfc_sli_handle_slow_ring_event_s3;
5417 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5418 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5419 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5420 break;
5421 case LPFC_PCI_DEV_OC:
5422 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5423 phba->lpfc_sli_handle_slow_ring_event =
5424 lpfc_sli_handle_slow_ring_event_s4;
5425 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5426 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5427 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5428 break;
5429 default:
5430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5431 "1420 Invalid HBA PCI-device group: 0x%x\n",
5432 dev_grp);
5433 return -ENODEV;
5434 break;
5435 }
5436 return 0;
5437}
5438
5439/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5440 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5441 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5442 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5488}
3702 5489
3703/** 5490/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5491 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5492 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5493 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5494 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5495 * @flag: Flag indicating if this command can be put into txq.
3709 * 5496 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5497 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5498 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5499 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5500 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5501 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5502 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5503 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5504 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5505 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5506 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5507 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5508 * This function is called with hbalock held. The function will return success
3722 * 5509 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5510 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5511 **/
3727static int 5512static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5513__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5514 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5515{
3731 struct lpfc_iocbq *nextiocb; 5516 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5517 IOCB_t *iocb;
5518 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5519
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5520 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5521 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5619 return IOCB_BUSY;
3834} 5620}
3835 5621
5622/**
5623 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5624 * @phba: Pointer to HBA context object.
5625 * @piocb: Pointer to command iocb.
5626 * @sglq: Pointer to the scatter gather queue object.
5627 *
5628 * This routine converts the bpl or bde that is in the IOCB
5629 * to a sgl list for the sli4 hardware. The physical address
5630 * of the bpl/bde is converted back to a virtual address.
5631 * If the IOCB contains a BPL then the list of BDE's is
5632 * converted to sli4_sge's. If the IOCB contains a single
5633 * BDE then it is converted to a single sli_sge.
5634 * The IOCB is still in cpu endianess so the contents of
5635 * the bpl can be used without byte swapping.
5636 *
5637 * Returns valid XRI = Success, NO_XRI = Failure.
5638**/
5639static uint16_t
5640lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5641 struct lpfc_sglq *sglq)
5642{
5643 uint16_t xritag = NO_XRI;
5644 struct ulp_bde64 *bpl = NULL;
5645 struct ulp_bde64 bde;
5646 struct sli4_sge *sgl = NULL;
5647 IOCB_t *icmd;
5648 int numBdes = 0;
5649 int i = 0;
5650
5651 if (!piocbq || !sglq)
5652 return xritag;
5653
5654 sgl = (struct sli4_sge *)sglq->sgl;
5655 icmd = &piocbq->iocb;
5656 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5657 numBdes = icmd->un.genreq64.bdl.bdeSize /
5658 sizeof(struct ulp_bde64);
5659 /* The addrHigh and addrLow fields within the IOCB
5660 * have not been byteswapped yet so there is no
5661 * need to swap them back.
5662 */
5663 bpl = (struct ulp_bde64 *)
5664 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5665
5666 if (!bpl)
5667 return xritag;
5668
5669 for (i = 0; i < numBdes; i++) {
5670 /* Should already be byte swapped. */
5671 sgl->addr_hi = bpl->addrHigh;
5672 sgl->addr_lo = bpl->addrLow;
5673 /* swap the size field back to the cpu so we
5674 * can assign it to the sgl.
5675 */
5676 bde.tus.w = le32_to_cpu(bpl->tus.w);
5677 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5678 if ((i+1) == numBdes)
5679 bf_set(lpfc_sli4_sge_last, sgl, 1);
5680 else
5681 bf_set(lpfc_sli4_sge_last, sgl, 0);
5682 sgl->word2 = cpu_to_le32(sgl->word2);
5683 sgl->word3 = cpu_to_le32(sgl->word3);
5684 bpl++;
5685 sgl++;
5686 }
5687 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5688 /* The addrHigh and addrLow fields of the BDE have not
5689 * been byteswapped yet so they need to be swapped
5690 * before putting them in the sgl.
5691 */
5692 sgl->addr_hi =
5693 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5694 sgl->addr_lo =
5695 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5696 bf_set(lpfc_sli4_sge_len, sgl,
5697 icmd->un.genreq64.bdl.bdeSize);
5698 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 sgl->word2 = cpu_to_le32(sgl->word2);
5700 sgl->word3 = cpu_to_le32(sgl->word3);
5701 }
5702 return sglq->sli4_xritag;
5703}
5704
5705/**
5706 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5707 * @phba: Pointer to HBA context object.
5708 * @piocb: Pointer to command iocb.
5709 *
5710 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5711 * distribution.
5712 *
5713 * Return: index into SLI4 fast-path FCP queue index.
5714 **/
5715static uint32_t
5716lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5717{
5718 static uint32_t fcp_qidx;
5719
5720 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5721}
5722
5723/**
5724 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5725 * @phba: Pointer to HBA context object.
5726 * @piocb: Pointer to command iocb.
5727 * @wqe: Pointer to the work queue entry.
5728 *
5729 * This routine converts the iocb command to its Work Queue Entry
5730 * equivalent. The wqe pointer should not have any fields set when
5731 * this routine is called because it will memcpy over them.
5732 * This routine does not set the CQ_ID or the WQEC bits in the
5733 * wqe.
5734 *
5735 * Returns: 0 = Success, IOCB_ERROR = Failure.
5736 **/
5737static int
5738lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5739 union lpfc_wqe *wqe)
5740{
5741 uint32_t payload_len = 0;
5742 uint8_t ct = 0;
5743 uint32_t fip;
5744 uint32_t abort_tag;
5745 uint8_t command_type = ELS_COMMAND_NON_FIP;
5746 uint8_t cmnd;
5747 uint16_t xritag;
5748 struct ulp_bde64 *bpl = NULL;
5749
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND;
5758 else {
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5760 "2019 Invalid cmd 0x%x\n",
5761 iocbq->iocb.ulpCommand);
5762 return IOCB_ERROR;
5763 }
5764 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag;
5767 xritag = iocbq->sli4_xritag;
5768 wqe->words[7] = 0; /* The ct field has moved so reset */
5769 /* words0-2 bpl convert bde */
5770 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5771 bpl = (struct ulp_bde64 *)
5772 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5773 if (!bpl)
5774 return IOCB_ERROR;
5775
5776 /* Should already be byte swapped. */
5777 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5778 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5779 /* swap the size field back to the cpu so we
5780 * can assign it to the sgl.
5781 */
5782 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5783 payload_len = wqe->generic.bde.tus.f.bdeSize;
5784 } else
5785 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5786
5787 iocbq->iocb.ulpIoTag = iocbq->iotag;
5788 cmnd = iocbq->iocb.ulpCommand;
5789
5790 switch (iocbq->iocb.ulpCommand) {
5791 case CMD_ELS_REQUEST64_CR:
5792 if (!iocbq->iocb.ulpLe) {
5793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5794 "2007 Only Limited Edition cmd Format"
5795 " supported 0x%x\n",
5796 iocbq->iocb.ulpCommand);
5797 return IOCB_ERROR;
5798 }
5799 wqe->els_req.payload_len = payload_len;
5800 /* Els_reguest64 has a TMO */
5801 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5802 iocbq->iocb.ulpTimeout);
5803 /* Need a VF for word 4 set the vf bit*/
5804 bf_set(els_req64_vf, &wqe->els_req, 0);
5805 /* And a VFID for word 12 */
5806 bf_set(els_req64_vfid, &wqe->els_req, 0);
5807 /*
5808 * Set ct field to 3, indicates that the context_tag field
5809 * contains the FCFI and remote N_Port_ID is
5810 * in word 5.
5811 */
5812
5813 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext);
5816
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5825 break;
5826 case CMD_XMIT_SEQUENCE64_CR:
5827 /* word3 iocb=io_tag32 wqe=payload_offset */
5828 /* payload offset used for multilpe outstanding
5829 * sequences on the same exchange
5830 */
5831 wqe->words[3] = 0;
5832 /* word4 relative_offset memcpy */
5833 /* word5 r_ctl/df_ctl memcpy */
5834 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5835 wqe->xmit_sequence.xmit_len = payload_len;
5836 break;
5837 case CMD_XMIT_BCAST64_CN:
5838 /* word3 iocb=iotag32 wqe=payload_len */
5839 wqe->words[3] = 0; /* no definition for this in wqe */
5840 /* word4 iocb=rsvd wqe=rsvd */
5841 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5842 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5843 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5844 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5845 break;
5846 case CMD_FCP_IWRITE64_CR:
5847 command_type = FCP_COMMAND_DATA_OUT;
5848 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5849 * confusing.
5850 * word3 is payload_len: byte offset to the sgl entry for the
5851 * fcp_command.
5852 * word4 is total xfer len, same as the IOCB->ulpParameter.
5853 * word5 is initial xfer len 0 = wait for xfer-ready
5854 */
5855
5856 /* Always wait for xfer-ready before sending data */
5857 wqe->fcp_iwrite.initial_xfer_len = 0;
5858 /* word 4 (xfer length) should have been set on the memcpy */
5859
5860 /* allow write to fall through to read */
5861 case CMD_FCP_IREAD64_CR:
5862 /* FCP_CMD is always the 1st sgl entry */
5863 wqe->fcp_iread.payload_len =
5864 payload_len + sizeof(struct fcp_rsp);
5865
5866 /* word 4 (xfer length) should have been set on the memcpy */
5867
5868 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5869 iocbq->iocb.ulpFCP2Rcvy);
5870 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5871 /* The XC bit and the XS bit are similar. The driver never
5872 * tracked whether or not the exchange was previouslly open.
5873 * XC = Exchange create, 0 is create. 1 is already open.
5874 * XS = link cmd: 1 do not close the exchange after command.
5875 * XS = 0 close exchange when command completes.
5876 * The only time we would not set the XC bit is when the XS bit
5877 * is set and we are sending our 2nd or greater command on
5878 * this exchange.
5879 */
5880
5881 /* ALLOW read & write to fall through to ICMD64 */
5882 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5888 break;
5889 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the
5891 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5892 * sgl[0] = cmnd
5893 * sgl[1] = rsp.
5894 *
5895 */
5896 wqe->gen_req.command_len = payload_len;
5897 /* Word4 parameter copied in the memcpy */
5898 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5899 /* word6 context tag copied in memcpy */
5900 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
5901 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5903 "2015 Invalid CT %x command 0x%x\n",
5904 ct, iocbq->iocb.ulpCommand);
5905 return IOCB_ERROR;
5906 }
5907 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5908 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910
5911 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5912 command_type = OTHER_COMMAND;
5913 break;
5914 case CMD_XMIT_ELS_RSP64_CX:
5915 /* words0-2 BDE memcpy */
5916 /* word3 iocb=iotag32 wqe=rsvd */
5917 wqe->words[3] = 0;
5918 /* word4 iocb=did wge=rsvd. */
5919 wqe->words[4] = 0;
5920 /* word5 iocb=rsvd wge=did */
5921 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5922 iocbq->iocb.un.elsreq64.remoteID);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5925 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5926
5927 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5928 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5929 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5930 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5931 iocbq->vport->vpi + phba->vpi_base);
5932 command_type = OTHER_COMMAND;
5933 break;
5934 case CMD_CLOSE_XRI_CN:
5935 case CMD_ABORT_XRI_CN:
5936 case CMD_ABORT_XRI_CX:
5937 /* words 0-2 memcpy should be 0 rserved */
5938 /* port will send abts */
5939 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5940 /*
5941 * The link is down so the fw does not need to send abts
5942 * on the wire.
5943 */
5944 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5945 else
5946 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5947 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5948 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5949 wqe->words[5] = 0;
5950 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5951 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5952 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5953 wqe->generic.abort_tag = abort_tag;
5954 /*
5955 * The abort handler will send us CMD_ABORT_XRI_CN or
5956 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5957 */
5958 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
5959 cmnd = CMD_ABORT_XRI_CX;
5960 command_type = OTHER_COMMAND;
5961 xritag = 0;
5962 break;
5963 case CMD_XRI_ABORTED_CX:
5964 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
5965 /* words0-2 are all 0's no bde */
5966 /* word3 and word4 are rsvrd */
5967 wqe->words[3] = 0;
5968 wqe->words[4] = 0;
5969 /* word5 iocb=rsvd wge=did */
5970 /* There is no remote port id in the IOCB? */
5971 /* Let this fall through and fail */
5972 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
5973 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
5974 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
5975 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
5976 default:
5977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5978 "2014 Invalid command 0x%x\n",
5979 iocbq->iocb.ulpCommand);
5980 return IOCB_ERROR;
5981 break;
5982
5983 }
5984 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
5985 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
5986 wqe->generic.abort_tag = abort_tag;
5987 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
5988 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
5989 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
5990 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
5991
5992 return 0;
5993}
5994
5995/**
5996 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
5997 * @phba: Pointer to HBA context object.
5998 * @ring_number: SLI ring number to issue iocb on.
5999 * @piocb: Pointer to command iocb.
6000 * @flag: Flag indicating if this command can be put into txq.
6001 *
6002 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6003 * an iocb command to an HBA with SLI-4 interface spec.
6004 *
6005 * This function is called with hbalock held. The function will return success
6006 * after it successfully submit the iocb to firmware or after adding to the
6007 * txq.
6008 **/
6009static int
6010__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6011 struct lpfc_iocbq *piocb, uint32_t flag)
6012{
6013 struct lpfc_sglq *sglq;
6014 uint16_t xritag;
6015 union lpfc_wqe wqe;
6016 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6017 uint32_t fcp_wqidx;
6018
6019 if (piocb->sli4_xritag == NO_XRI) {
6020 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6021 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6022 sglq = NULL;
6023 else {
6024 sglq = __lpfc_sli_get_sglq(phba);
6025 if (!sglq)
6026 return IOCB_ERROR;
6027 piocb->sli4_xritag = sglq->sli4_xritag;
6028 }
6029 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6030 sglq = NULL; /* These IO's already have an XRI and
6031 * a mapped sgl.
6032 */
6033 } else {
6034 /* This is a continuation of a commandi,(CX) so this
6035 * sglq is on the active list
6036 */
6037 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6038 if (!sglq)
6039 return IOCB_ERROR;
6040 }
6041
6042 if (sglq) {
6043 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6044 if (xritag != sglq->sli4_xritag)
6045 return IOCB_ERROR;
6046 }
6047
6048 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6049 return IOCB_ERROR;
6050
6051 if (piocb->iocb_flag & LPFC_IO_FCP) {
6052 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6053 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6054 return IOCB_ERROR;
6055 } else {
6056 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6057 return IOCB_ERROR;
6058 }
6059 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6060
6061 return 0;
6062}
6063
6064/**
6065 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6066 *
6067 * This routine wraps the actual lockless version for issusing IOCB function
6068 * pointer from the lpfc_hba struct.
6069 *
6070 * Return codes:
6071 * IOCB_ERROR - Error
6072 * IOCB_SUCCESS - Success
6073 * IOCB_BUSY - Busy
6074 **/
6075static inline int
6076__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6077 struct lpfc_iocbq *piocb, uint32_t flag)
6078{
6079 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6080}
6081
6082/**
6083 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6084 * @phba: The hba struct for which this call is being executed.
6085 * @dev_grp: The HBA PCI-Device group number.
6086 *
6087 * This routine sets up the SLI interface API function jump table in @phba
6088 * struct.
6089 * Returns: 0 - success, -ENODEV - failure.
6090 **/
6091int
6092lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6093{
6094
6095 switch (dev_grp) {
6096 case LPFC_PCI_DEV_LP:
6097 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6098 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6099 break;
6100 case LPFC_PCI_DEV_OC:
6101 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6102 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6103 break;
6104 default:
6105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6106 "1419 Invalid HBA PCI-device group: 0x%x\n",
6107 dev_grp);
6108 return -ENODEV;
6109 break;
6110 }
6111 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6112 return 0;
6113}
3836 6114
3837/** 6115/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6116 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6126 * functions which do not hold hbalock.
3849 **/ 6127 **/
3850int 6128int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6129lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6130 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6131{
3854 unsigned long iflags; 6132 unsigned long iflags;
3855 int rc; 6133 int rc;
3856 6134
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6135 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6136 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6137 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6138
3861 return rc; 6139 return rc;
@@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6426}
4149 6427
4150/** 6428/**
6429 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6430 * @phba: Pointer to HBA context object.
6431 *
6432 * This routine flushes the mailbox command subsystem. It will unconditionally
6433 * flush all the mailbox commands in the three possible stages in the mailbox
6434 * command sub-system: pending mailbox command queue; the outstanding mailbox
6435 * command; and completed mailbox command queue. It is caller's responsibility
6436 * to make sure that the driver is in the proper state to flush the mailbox
6437 * command sub-system. Namely, the posting of mailbox commands into the
6438 * pending mailbox command queue from the various clients must be stopped;
6439 * either the HBA is in a state that it will never works on the outstanding
6440 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6441 * mailbox command has been completed.
6442 **/
6443static void
6444lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6445{
6446 LIST_HEAD(completions);
6447 struct lpfc_sli *psli = &phba->sli;
6448 LPFC_MBOXQ_t *pmb;
6449 unsigned long iflag;
6450
6451 /* Flush all the mailbox commands in the mbox system */
6452 spin_lock_irqsave(&phba->hbalock, iflag);
6453 /* The pending mailbox command queue */
6454 list_splice_init(&phba->sli.mboxq, &completions);
6455 /* The outstanding active mailbox command */
6456 if (psli->mbox_active) {
6457 list_add_tail(&psli->mbox_active->list, &completions);
6458 psli->mbox_active = NULL;
6459 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6460 }
6461 /* The completed mailbox command queue */
6462 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6463 spin_unlock_irqrestore(&phba->hbalock, iflag);
6464
6465 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6466 while (!list_empty(&completions)) {
6467 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6468 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6469 if (pmb->mbox_cmpl)
6470 pmb->mbox_cmpl(phba, pmb);
6471 }
6472}
6473
6474/**
4151 * lpfc_sli_host_down - Vport cleanup function 6475 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6476 * @vport: Pointer to virtual port object.
4153 * 6477 *
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6564 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6565 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6566 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6567 unsigned long flags = 0;
6568 int i;
6569
6570 /* Shutdown the mailbox command sub-system */
6571 lpfc_sli_mbox_sys_shutdown(phba);
4246 6572
4247 lpfc_hba_down_prep(phba); 6573 lpfc_hba_down_prep(phba);
4248 6574
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6613
4288 /* Return any active mbox cmds */ 6614 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6615 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6616
4292 spin_lock(&phba->pport->work_port_lock); 6617 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6618 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6619 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6620
4296 /* Return any pending or completed mbox cmds */ 6621 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6622}
4298 if (psli->mbox_active) { 6623
4299 list_add_tail(&psli->mbox_active->list, &completions); 6624/**
4300 psli->mbox_active = NULL; 6625 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6626 * @phba: Pointer to HBA context object.
4302 } 6627 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6628 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6629 * shutting down the SLI4 HBA FCoE function. This function is called with no
6630 * lock held and always returns 1.
6631 *
6632 * This function does the following to cleanup driver FCoE function resources:
6633 * - Free discovery resources for each virtual port
6634 * - Cleanup any pending fabric iocbs
6635 * - Iterate through the iocb txq and free each entry in the list.
6636 * - Free up any buffer posted to the HBA.
6637 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6638 * - Free mailbox commands in the mailbox queue.
6639 **/
6640int
6641lpfc_sli4_hba_down(struct lpfc_hba *phba)
6642{
6643 /* Stop the SLI4 device port */
6644 lpfc_stop_port(phba);
6645
6646 /* Tear down the queues in the HBA */
6647 lpfc_sli4_queue_unset(phba);
6648
6649 /* unregister default FCFI from the HBA */
6650 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6651
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6652 return 1;
4313} 6653}
4314 6654
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 6979 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 6980 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 6981 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 6982 if (phba->sli_rev == LPFC_SLI_REV4)
6983 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
6984 else
6985 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 6986 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 6987 iabt->ulpClass = icmd->ulpClass;
4645 6988
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 6998 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 6999 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7000 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7001 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7002
4660 if (retval) 7003 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7004 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7181 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7182 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7183 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7184 if (phba->sli_rev == LPFC_SLI_REV4)
7185 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7186 else
7187 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7188 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7189 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7190 abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7196
4851 /* Setup callback routine and issue the command. */ 7197 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7198 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7199 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7200 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7201 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7202 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7203 errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7278 **/
4932int 7279int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7280lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7281 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7282 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7283 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7284 uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7309 readl(phba->HCregaddr); /* flush */
4963 } 7310 }
4964 7311
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7312 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7313 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7314 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7315 timeleft = wait_event_timeout(done_q,
@@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7424}
5078 7425
5079/** 7426/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7427 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7428 * @phba: Pointer to HBA context.
5082 * 7429 *
5083 * This function is called to cleanup any pending mailbox 7430 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7431 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7432 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7433 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7434 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7435 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7436 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7437 * as with offline or HBA function reset), this routine will wait for the
7438 * outstanding mailbox command to complete before invoking the mailbox
7439 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7440 **/
5091int 7441void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7442lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7443{
5094 struct lpfc_vport *vport = phba->pport; 7444 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7445 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7446 unsigned long timeout;
5097 7447
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7448 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7449 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7450 spin_unlock_irq(&phba->hbalock);
5101 7451
5102 /* 7452 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7453 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7454 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7455 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7456 spin_unlock_irq(&phba->hbalock);
7457 /* Determine how long we might wait for the active mailbox
7458 * command to be gracefully completed by firmware.
7459 */
7460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7461 1000) + jiffies;
7462 while (phba->sli.mbox_active) {
7463 /* Check active mailbox complete status every 2ms */
7464 msleep(2);
7465 if (time_after(jiffies, timeout))
7466 /* Timeout, let the mailbox flush routine to
7467 * forcefully release active mailbox command
7468 */
7469 break;
7470 }
7471 }
7472 lpfc_sli_mbox_sys_flush(phba);
7473}
7474
7475/**
7476 * lpfc_sli_eratt_read - read sli-3 error attention events
7477 * @phba: Pointer to HBA context.
7478 *
7479 * This function is called to read the SLI3 device error attention registers
7480 * for possible error attention events. The caller must hold the hostlock
7481 * with spin_lock_irq().
7482 *
7483 * This fucntion returns 1 when there is Error Attention in the Host Attention
7484 * Register and returns 0 otherwise.
7485 **/
7486static int
7487lpfc_sli_eratt_read(struct lpfc_hba *phba)
7488{
7489 uint32_t ha_copy;
5111 7490
5112 if (ha_copy & HA_MBATT) 7491 /* Read chip Host Attention (HA) register */
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7492 ha_copy = readl(phba->HAregaddr);
5114 i = 0; 7493 if (ha_copy & HA_ERATT) {
7494 /* Read host status register to retrieve error event */
7495 lpfc_sli_read_hs(phba);
7496
7497 /* Check if there is a deferred error condition is active */
7498 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr);
7507 }
5115 7508
5116 msleep(1); 7509 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1;
5117 } 7516 }
7517 return 0;
7518}
7519
7520/**
7521 * lpfc_sli4_eratt_read - read sli-4 error attention events
7522 * @phba: Pointer to HBA context.
7523 *
7524 * This function is called to read the SLI4 device error attention registers
7525 * for possible error attention events. The caller must hold the hostlock
7526 * with spin_lock_irq().
7527 *
7528 * This fucntion returns 1 when there is Error Attention in the Host Attention
7529 * Register and returns 0 otherwise.
7530 **/
7531static int
7532lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7533{
7534 uint32_t uerr_sta_hi, uerr_sta_lo;
7535 uint32_t onlnreg0, onlnreg1;
5118 7536
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7537 /* For now, use the SLI4 device internal unrecoverable error
7538 * registers for error attention. This can be changed later.
7539 */
7540 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7541 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7542 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7543 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7544 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7545 if (uerr_sta_lo || uerr_sta_hi) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7547 "1423 HBA Unrecoverable error: "
7548 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7549 "online0_reg=0x%x, online1_reg=0x%x\n",
7550 uerr_sta_lo, uerr_sta_hi,
7551 onlnreg0, onlnreg1);
7552 /* TEMP: as the driver error recover logic is not
7553 * fully developed, we just log the error message
7554 * and the device error attention action is now
7555 * temporarily disabled.
7556 */
7557 return 0;
7558 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1;
7567 }
7568 }
7569 return 0;
5120} 7570}
5121 7571
5122/** 7572/**
5123 * lpfc_sli_check_eratt - check error attention events 7573 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7574 * @phba: Pointer to HBA context.
5125 * 7575 *
5126 * This function is called form timer soft interrupt context to check HBA's 7576 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7577 * error attention register bit for error attention events.
5128 * 7578 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7579 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7584{
5135 uint32_t ha_copy; 7585 uint32_t ha_copy;
5136 7586
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7587 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7588 * here. The brdkill function will do this.
5143 */ 7589 */
@@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7607 return 0;
5162 } 7608 }
5163 7609
5164 /* Read chip Host Attention (HA) register */ 7610 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7611 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7612 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7613 return 0;
7614 }
7615
7616 switch (phba->sli_rev) {
7617 case LPFC_SLI_REV2:
7618 case LPFC_SLI_REV3:
7619 /* Read chip Host Attention (HA) register */
7620 ha_copy = lpfc_sli_eratt_read(phba);
7621 break;
7622 case LPFC_SLI_REV4:
7623 /* Read devcie Uncoverable Error (UERR) registers */
7624 ha_copy = lpfc_sli4_eratt_read(phba);
7625 break;
7626 default:
7627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7628 "0299 Invalid SLI revision (%d)\n",
7629 phba->sli_rev);
7630 ha_copy = 0;
7631 break;
5186 } 7632 }
5187 spin_unlock_irq(&phba->hbalock); 7633 spin_unlock_irq(&phba->hbalock);
7634
7635 return ha_copy;
7636}
7637
7638/**
7639 * lpfc_intr_state_check - Check device state for interrupt handling
7640 * @phba: Pointer to HBA context.
7641 *
7642 * This inline routine checks whether a device or its PCI slot is in a state
7643 * that the interrupt should be handled.
7644 *
7645 * This function returns 0 if the device or the PCI slot is in a state that
7646 * interrupt should be handled, otherwise -EIO.
7647 */
7648static inline int
7649lpfc_intr_state_check(struct lpfc_hba *phba)
7650{
7651 /* If the pci channel is offline, ignore all the interrupts */
7652 if (unlikely(pci_channel_offline(phba->pcidev)))
7653 return -EIO;
7654
7655 /* Update device level interrupt statistics */
7656 phba->sli.slistat.sli_intr++;
7657
7658 /* Ignore all interrupts during initialization. */
7659 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7660 return -EIO;
7661
5188 return 0; 7662 return 0;
5189} 7663}
5190 7664
5191/** 7665/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7666 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7667 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7668 * @dev_id: The device context pointer.
5195 * 7669 *
5196 * This function is directly called from the PCI layer as an interrupt 7670 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7671 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7672 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7673 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7674 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7675 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7676 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7677 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7678 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7679 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7680 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7681 * structures.
5208 * 7682 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7683 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7684 * returns IRQ_NONE.
5211 **/ 7685 **/
5212irqreturn_t 7686irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7687lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7688{
5215 struct lpfc_hba *phba; 7689 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7690 uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7714 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7715 */
5242 if (phba->intr_type == MSIX) { 7716 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7717 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7718 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7719 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7720 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7721 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7740 * interrupt.
5272 */ 7741 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7742 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7743 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7744 return IRQ_NONE;
5276 } 7745 }
5277 7746
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7833
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7834 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7835 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7836 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7837 mbox = phba->mbox;
5369 vport = pmb->vport; 7838 vport = pmb->vport;
5370 7839
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 7903 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 7904 "0350 rc should have"
5436 "been MBX_BUSY"); 7905 "been MBX_BUSY");
5437 goto send_current_mbox; 7906 if (rc != MBX_NOT_FINISHED)
7907 goto send_current_mbox;
5438 } 7908 }
5439 } 7909 }
5440 spin_lock_irqsave( 7910 spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
5471 } 7941 }
5472 return IRQ_HANDLED; 7942 return IRQ_HANDLED;
5473 7943
5474} /* lpfc_sp_intr_handler */ 7944} /* lpfc_sli_sp_intr_handler */
5475 7945
5476/** 7946/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 7947 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 7948 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 7949 * @dev_id: The device context pointer.
5480 * 7950 *
5481 * This function is directly called from the PCI layer as an interrupt 7951 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 7952 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 7953 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 7954 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 7955 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 7956 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 7957 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 7958 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 7959 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 7960 * It gets the hbalock to access and update SLI data structures.
5491 * 7961 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 7962 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 7963 * returns IRQ_NONE.
5494 **/ 7964 **/
5495irqreturn_t 7965irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 7966lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 7967{
5498 struct lpfc_hba *phba; 7968 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 7969 uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 7983 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 7984 */
5515 if (phba->intr_type == MSIX) { 7985 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 7986 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 7987 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 7988 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 7989 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 7990 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 7995 * any interrupt.
5531 */ 7996 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7997 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 7998 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 7999 return IRQ_NONE;
5535 } 8000 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8001 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8031 }
5567 } 8032 }
5568 return IRQ_HANDLED; 8033 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8034} /* lpfc_sli_fp_intr_handler */
5570 8035
5571/** 8036/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8037 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8038 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8039 * @dev_id: The device context pointer.
5575 * 8040 *
5576 * This function is the device-level interrupt handler called from the PCI 8041 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8042 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8043 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8044 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8045 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8046 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8047 * function is called without any lock held. It gets the hbalock to access
8048 * and update SLI data structures.
5583 * 8049 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8050 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8051 * returns IRQ_NONE.
5586 **/ 8052 **/
5587irqreturn_t 8053irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8054lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8055{
5590 struct lpfc_hba *phba; 8056 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8057 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8066 if (unlikely(!phba))
5601 return IRQ_NONE; 8067 return IRQ_NONE;
5602 8068
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8069 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8070 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8071 return IRQ_NONE;
5613 8072
5614 spin_lock(&phba->hbalock); 8073 spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8109 status2 >>= (4*LPFC_ELS_RING);
5651 8110
5652 if (status1 || (status2 & HA_RXMASK)) 8111 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8112 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8113 else
5655 sp_irq_rc = IRQ_NONE; 8114 sp_irq_rc = IRQ_NONE;
5656 8115
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8129 status2 = 0;
5671 8130
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8131 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8132 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8133 else
5675 fp_irq_rc = IRQ_NONE; 8134 fp_irq_rc = IRQ_NONE;
5676 8135
5677 /* Return device-level interrupt handling status */ 8136 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8137 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8138} /* lpfc_sli_intr_handler */
8139
8140/**
8141 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8142 * @phba: pointer to lpfc hba data structure.
8143 *
8144 * This routine is invoked by the worker thread to process all the pending
8145 * SLI4 FCP abort XRI events.
8146 **/
8147void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8148{
8149 struct lpfc_cq_event *cq_event;
8150
8151 /* First, declare the fcp xri abort event has been handled */
8152 spin_lock_irq(&phba->hbalock);
8153 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8154 spin_unlock_irq(&phba->hbalock);
8155 /* Now, handle all the fcp xri abort events */
8156 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8157 /* Get the first event from the head of the event queue */
8158 spin_lock_irq(&phba->hbalock);
8159 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8160 cq_event, struct lpfc_cq_event, list);
8161 spin_unlock_irq(&phba->hbalock);
8162 /* Notify aborted XRI for FCP work queue */
8163 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8164 /* Free the event processed back to the free pool */
8165 lpfc_sli4_cq_event_release(phba, cq_event);
8166 }
8167}
8168
8169/**
8170 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8171 * @phba: pointer to lpfc hba data structure.
8172 *
8173 * This routine is invoked by the worker thread to process all the pending
8174 * SLI4 els abort xri events.
8175 **/
8176void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8177{
8178 struct lpfc_cq_event *cq_event;
8179
8180 /* First, declare the els xri abort event has been handled */
8181 spin_lock_irq(&phba->hbalock);
8182 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8183 spin_unlock_irq(&phba->hbalock);
8184 /* Now, handle all the els xri abort events */
8185 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8186 /* Get the first event from the head of the event queue */
8187 spin_lock_irq(&phba->hbalock);
8188 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8189 cq_event, struct lpfc_cq_event, list);
8190 spin_unlock_irq(&phba->hbalock);
8191 /* Notify aborted XRI for ELS work queue */
8192 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8193 /* Free the event processed back to the free pool */
8194 lpfc_sli4_cq_event_release(phba, cq_event);
8195 }
8196}
8197
8198static void
8199lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8200 struct lpfc_iocbq *pIocbOut,
8201 struct lpfc_wcqe_complete *wcqe)
8202{
8203 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8204
8205 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8206 sizeof(struct lpfc_iocbq) - offset);
8207 memset(&pIocbIn->sli4_info, 0,
8208 sizeof(struct lpfc_sli4_rspiocb_info));
8209 /* Map WCQE parameters into irspiocb parameters */
8210 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8211 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8212 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8213 pIocbIn->iocb.un.fcpi.fcpi_parm =
8214 pIocbOut->iocb.un.fcpi.fcpi_parm -
8215 wcqe->total_data_placed;
8216 else
8217 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8218 else
8219 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8220 /* Load in additional WCQE parameters */
8221 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8222 pIocbIn->sli4_info.bfield = 0;
8223 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8224 pIocbIn->sli4_info.bfield |= LPFC_XB;
8225 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8226 pIocbIn->sli4_info.bfield |= LPFC_PV;
8227 pIocbIn->sli4_info.priority =
8228 bf_get(lpfc_wcqe_c_priority, wcqe);
8229 }
8230}
8231
8232/**
8233 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8234 * @phba: Pointer to HBA context object.
8235 * @cqe: Pointer to mailbox completion queue entry.
8236 *
8237 * This routine process a mailbox completion queue entry with asynchrous
8238 * event.
8239 *
8240 * Return: true if work posted to worker thread, otherwise false.
8241 **/
8242static bool
8243lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8244{
8245 struct lpfc_cq_event *cq_event;
8246 unsigned long iflags;
8247
8248 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8249 "0392 Async Event: word0:x%x, word1:x%x, "
8250 "word2:x%x, word3:x%x\n", mcqe->word0,
8251 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8252
8253 /* Allocate a new internal CQ_EVENT entry */
8254 cq_event = lpfc_sli4_cq_event_alloc(phba);
8255 if (!cq_event) {
8256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8257 "0394 Failed to allocate CQ_EVENT entry\n");
8258 return false;
8259 }
8260
8261 /* Move the CQE into an asynchronous event entry */
8262 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8263 spin_lock_irqsave(&phba->hbalock, iflags);
8264 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8265 /* Set the async event flag */
8266 phba->hba_flag |= ASYNC_EVENT;
8267 spin_unlock_irqrestore(&phba->hbalock, iflags);
8268
8269 return true;
8270}
8271
8272/**
8273 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8274 * @phba: Pointer to HBA context object.
8275 * @cqe: Pointer to mailbox completion queue entry.
8276 *
8277 * This routine process a mailbox completion queue entry with mailbox
8278 * completion event.
8279 *
8280 * Return: true if work posted to worker thread, otherwise false.
8281 **/
8282static bool
8283lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8284{
8285 uint32_t mcqe_status;
8286 MAILBOX_t *mbox, *pmbox;
8287 struct lpfc_mqe *mqe;
8288 struct lpfc_vport *vport;
8289 struct lpfc_nodelist *ndlp;
8290 struct lpfc_dmabuf *mp;
8291 unsigned long iflags;
8292 LPFC_MBOXQ_t *pmb;
8293 bool workposted = false;
8294 int rc;
8295
8296 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8297 if (!bf_get(lpfc_trailer_completed, mcqe))
8298 goto out_no_mqe_complete;
8299
8300 /* Get the reference to the active mbox command */
8301 spin_lock_irqsave(&phba->hbalock, iflags);
8302 pmb = phba->sli.mbox_active;
8303 if (unlikely(!pmb)) {
8304 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8305 "1832 No pending MBOX command to handle\n");
8306 spin_unlock_irqrestore(&phba->hbalock, iflags);
8307 goto out_no_mqe_complete;
8308 }
8309 spin_unlock_irqrestore(&phba->hbalock, iflags);
8310 mqe = &pmb->u.mqe;
8311 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8312 mbox = phba->mbox;
8313 vport = pmb->vport;
8314
8315 /* Reset heartbeat timer */
8316 phba->last_completion_time = jiffies;
8317 del_timer(&phba->sli.mbox_tmo);
8318
8319 /* Move mbox data to caller's mailbox region, do endian swapping */
8320 if (pmb->mbox_cmpl && mbox)
8321 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8322 /* Set the mailbox status with SLI4 range 0x4000 */
8323 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8324 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8325 bf_set(lpfc_mqe_status, mqe,
8326 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8327
8328 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8329 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8331 "MBOX dflt rpi: status:x%x rpi:x%x",
8332 mcqe_status,
8333 pmbox->un.varWords[0], 0);
8334 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8335 mp = (struct lpfc_dmabuf *)(pmb->context1);
8336 ndlp = (struct lpfc_nodelist *)pmb->context2;
8337 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8338 * RID of the PPI using the same mbox buffer.
8339 */
8340 lpfc_unreg_login(phba, vport->vpi,
8341 pmbox->un.varWords[0], pmb);
8342 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8343 pmb->context1 = mp;
8344 pmb->context2 = ndlp;
8345 pmb->vport = vport;
8346 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8347 if (rc != MBX_BUSY)
8348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8349 LOG_SLI, "0385 rc should "
8350 "have been MBX_BUSY\n");
8351 if (rc != MBX_NOT_FINISHED)
8352 goto send_current_mbox;
8353 }
8354 }
8355 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8356 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8357 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8358
8359 /* There is mailbox completion work to do */
8360 spin_lock_irqsave(&phba->hbalock, iflags);
8361 __lpfc_mbox_cmpl_put(phba, pmb);
8362 phba->work_ha |= HA_MBATT;
8363 spin_unlock_irqrestore(&phba->hbalock, iflags);
8364 workposted = true;
8365
8366send_current_mbox:
8367 spin_lock_irqsave(&phba->hbalock, iflags);
8368 /* Release the mailbox command posting token */
8369 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8370 /* Setting active mailbox pointer need to be in sync to flag clear */
8371 phba->sli.mbox_active = NULL;
8372 spin_unlock_irqrestore(&phba->hbalock, iflags);
8373 /* Wake up worker thread to post the next pending mailbox command */
8374 lpfc_worker_wake_up(phba);
8375out_no_mqe_complete:
8376 if (bf_get(lpfc_trailer_consumed, mcqe))
8377 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8378 return workposted;
8379}
8380
8381/**
8382 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8383 * @phba: Pointer to HBA context object.
8384 * @cqe: Pointer to mailbox completion queue entry.
8385 *
8386 * This routine process a mailbox completion queue entry, it invokes the
8387 * proper mailbox complete handling or asynchrous event handling routine
8388 * according to the MCQE's async bit.
8389 *
8390 * Return: true if work posted to worker thread, otherwise false.
8391 **/
8392static bool
8393lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8394{
8395 struct lpfc_mcqe mcqe;
8396 bool workposted;
8397
8398 /* Copy the mailbox MCQE and convert endian order as needed */
8399 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8400
8401 /* Invoke the proper event handling routine */
8402 if (!bf_get(lpfc_trailer_async, &mcqe))
8403 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8404 else
8405 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8406 return workposted;
8407}
8408
8409/**
8410 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8411 * @phba: Pointer to HBA context object.
8412 * @wcqe: Pointer to work-queue completion queue entry.
8413 *
8414 * This routine handles an ELS work-queue completion event.
8415 *
8416 * Return: true if work posted to worker thread, otherwise false.
8417 **/
8418static bool
8419lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8420 struct lpfc_wcqe_complete *wcqe)
8421{
8422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8423 struct lpfc_iocbq *cmdiocbq;
8424 struct lpfc_iocbq *irspiocbq;
8425 unsigned long iflags;
8426 bool workposted = false;
8427
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pring->stats.iocb_event++;
8430 /* Look up the ELS command IOCB and create pseudo response IOCB */
8431 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8432 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434
8435 if (unlikely(!cmdiocbq)) {
8436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8437 "0386 ELS complete with no corresponding "
8438 "cmdiocb: iotag (%d)\n",
8439 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8440 return workposted;
8441 }
8442
8443 /* Fake the irspiocbq and copy necessary response information */
8444 irspiocbq = lpfc_sli_get_iocbq(phba);
8445 if (!irspiocbq) {
8446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8447 "0387 Failed to allocate an iocbq\n");
8448 return workposted;
8449 }
8450 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8451
8452 /* Add the irspiocb to the response IOCB work list */
8453 spin_lock_irqsave(&phba->hbalock, iflags);
8454 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8455 /* Indicate ELS ring attention */
8456 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8457 spin_unlock_irqrestore(&phba->hbalock, iflags);
8458 workposted = true;
8459
8460 return workposted;
8461}
8462
8463/**
8464 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8465 * @phba: Pointer to HBA context object.
8466 * @wcqe: Pointer to work-queue completion queue entry.
8467 *
8468 * This routine handles slow-path WQ entry comsumed event by invoking the
8469 * proper WQ release routine to the slow-path WQ.
8470 **/
8471static void
8472lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8473 struct lpfc_wcqe_release *wcqe)
8474{
8475 /* Check for the slow-path ELS work queue */
8476 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8477 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8478 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8479 else
8480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8481 "2579 Slow-path wqe consume event carries "
8482 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8483 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8484 phba->sli4_hba.els_wq->queue_id);
8485}
8486
8487/**
8488 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8489 * @phba: Pointer to HBA context object.
8490 * @cq: Pointer to a WQ completion queue.
8491 * @wcqe: Pointer to work-queue completion queue entry.
8492 *
8493 * This routine handles an XRI abort event.
8494 *
8495 * Return: true if work posted to worker thread, otherwise false.
8496 **/
8497static bool
8498lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8499 struct lpfc_queue *cq,
8500 struct sli4_wcqe_xri_aborted *wcqe)
8501{
8502 bool workposted = false;
8503 struct lpfc_cq_event *cq_event;
8504 unsigned long iflags;
8505
8506 /* Allocate a new internal CQ_EVENT entry */
8507 cq_event = lpfc_sli4_cq_event_alloc(phba);
8508 if (!cq_event) {
8509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8510 "0602 Failed to allocate CQ_EVENT entry\n");
8511 return false;
8512 }
8513
8514 /* Move the CQE into the proper xri abort event list */
8515 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8516 switch (cq->subtype) {
8517 case LPFC_FCP:
8518 spin_lock_irqsave(&phba->hbalock, iflags);
8519 list_add_tail(&cq_event->list,
8520 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8521 /* Set the fcp xri abort event flag */
8522 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8523 spin_unlock_irqrestore(&phba->hbalock, iflags);
8524 workposted = true;
8525 break;
8526 case LPFC_ELS:
8527 spin_lock_irqsave(&phba->hbalock, iflags);
8528 list_add_tail(&cq_event->list,
8529 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8530 /* Set the els xri abort event flag */
8531 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8532 spin_unlock_irqrestore(&phba->hbalock, iflags);
8533 workposted = true;
8534 break;
8535 default:
8536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8537 "0603 Invalid work queue CQE subtype (x%x)\n",
8538 cq->subtype);
8539 workposted = false;
8540 break;
8541 }
8542 return workposted;
8543}
8544
8545/**
8546 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8547 * @phba: Pointer to HBA context object.
8548 * @cq: Pointer to the completion queue.
8549 * @wcqe: Pointer to a completion queue entry.
8550 *
8551 * This routine process a slow-path work-queue completion queue entry.
8552 *
8553 * Return: true if work posted to worker thread, otherwise false.
8554 **/
8555static bool
8556lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8557 struct lpfc_cqe *cqe)
8558{
8559 struct lpfc_wcqe_complete wcqe;
8560 bool workposted = false;
8561
8562 /* Copy the work queue CQE and convert endian order if needed */
8563 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8564
8565 /* Check and process for different type of WCQE and dispatch */
8566 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8567 case CQE_CODE_COMPL_WQE:
8568 /* Process the WQ complete event */
8569 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8570 (struct lpfc_wcqe_complete *)&wcqe);
8571 break;
8572 case CQE_CODE_RELEASE_WQE:
8573 /* Process the WQ release event */
8574 lpfc_sli4_sp_handle_rel_wcqe(phba,
8575 (struct lpfc_wcqe_release *)&wcqe);
8576 break;
8577 case CQE_CODE_XRI_ABORTED:
8578 /* Process the WQ XRI abort event */
8579 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8580 (struct sli4_wcqe_xri_aborted *)&wcqe);
8581 break;
8582 default:
8583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8584 "0388 Not a valid WCQE code: x%x\n",
8585 bf_get(lpfc_wcqe_c_code, &wcqe));
8586 break;
8587 }
8588 return workposted;
8589}
8590
8591/**
8592 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8593 * @phba: Pointer to HBA context object.
8594 * @rcqe: Pointer to receive-queue completion queue entry.
8595 *
8596 * This routine process a receive-queue completion queue entry.
8597 *
8598 * Return: true if work posted to worker thread, otherwise false.
8599 **/
8600static bool
8601lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8602{
8603 struct lpfc_rcqe rcqe;
8604 bool workposted = false;
8605 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8606 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8607 struct hbq_dmabuf *dma_buf;
8608 uint32_t status;
8609 unsigned long iflags;
8610
8611 /* Copy the receive queue CQE and convert endian order if needed */
8612 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8613 lpfc_sli4_rq_release(hrq, drq);
8614 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8615 goto out;
8616 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8617 goto out;
8618
8619 status = bf_get(lpfc_rcqe_status, &rcqe);
8620 switch (status) {
8621 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8623 "2537 Receive Frame Truncated!!\n");
8624 case FC_STATUS_RQ_SUCCESS:
8625 spin_lock_irqsave(&phba->hbalock, iflags);
8626 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8627 if (!dma_buf) {
8628 spin_unlock_irqrestore(&phba->hbalock, iflags);
8629 goto out;
8630 }
8631 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8632 /* save off the frame for the word thread to process */
8633 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8634 /* Frame received */
8635 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8636 spin_unlock_irqrestore(&phba->hbalock, iflags);
8637 workposted = true;
8638 break;
8639 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8640 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8641 /* Post more buffers if possible */
8642 spin_lock_irqsave(&phba->hbalock, iflags);
8643 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8644 spin_unlock_irqrestore(&phba->hbalock, iflags);
8645 workposted = true;
8646 break;
8647 }
8648out:
8649 return workposted;
8650
8651}
8652
8653/**
8654 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8655 * @phba: Pointer to HBA context object.
8656 * @eqe: Pointer to fast-path event queue entry.
8657 *
8658 * This routine process a event queue entry from the slow-path event queue.
8659 * It will check the MajorCode and MinorCode to determine this is for a
8660 * completion event on a completion queue, if not, an error shall be logged
8661 * and just return. Otherwise, it will get to the corresponding completion
8662 * queue and process all the entries on that completion queue, rearm the
8663 * completion queue, and then return.
8664 *
8665 **/
8666static void
8667lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8668{
8669 struct lpfc_queue *cq = NULL, *childq, *speq;
8670 struct lpfc_cqe *cqe;
8671 bool workposted = false;
8672 int ecount = 0;
8673 uint16_t cqid;
8674
8675 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8676 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8678 "0359 Not a valid slow-path completion "
8679 "event: majorcode=x%x, minorcode=x%x\n",
8680 bf_get(lpfc_eqe_major_code, eqe),
8681 bf_get(lpfc_eqe_minor_code, eqe));
8682 return;
8683 }
8684
8685 /* Get the reference to the corresponding CQ */
8686 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8687
8688 /* Search for completion queue pointer matching this cqid */
8689 speq = phba->sli4_hba.sp_eq;
8690 list_for_each_entry(childq, &speq->child_list, list) {
8691 if (childq->queue_id == cqid) {
8692 cq = childq;
8693 break;
8694 }
8695 }
8696 if (unlikely(!cq)) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8698 "0365 Slow-path CQ identifier (%d) does "
8699 "not exist\n", cqid);
8700 return;
8701 }
8702
8703 /* Process all the entries to the CQ */
8704 switch (cq->type) {
8705 case LPFC_MCQ:
8706 while ((cqe = lpfc_sli4_cq_get(cq))) {
8707 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8708 if (!(++ecount % LPFC_GET_QE_REL_INT))
8709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8710 }
8711 break;
8712 case LPFC_WCQ:
8713 while ((cqe = lpfc_sli4_cq_get(cq))) {
8714 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8715 if (!(++ecount % LPFC_GET_QE_REL_INT))
8716 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8717 }
8718 break;
8719 case LPFC_RCQ:
8720 while ((cqe = lpfc_sli4_cq_get(cq))) {
8721 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8722 if (!(++ecount % LPFC_GET_QE_REL_INT))
8723 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8724 }
8725 break;
8726 default:
8727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8728 "0370 Invalid completion queue type (%d)\n",
8729 cq->type);
8730 return;
8731 }
8732
8733 /* Catch the no cq entry condition, log an error */
8734 if (unlikely(ecount == 0))
8735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8736 "0371 No entry from the CQ: identifier "
8737 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8738
8739 /* In any case, flash and re-arm the RCQ */
8740 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8741
8742 /* wake up worker thread if there are works to be done */
8743 if (workposted)
8744 lpfc_worker_wake_up(phba);
8745}
8746
8747/**
8748 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8749 * @eqe: Pointer to fast-path completion queue entry.
8750 *
8751 * This routine process a fast-path work queue completion entry from fast-path
8752 * event queue for FCP command response completion.
8753 **/
8754static void
8755lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8756 struct lpfc_wcqe_complete *wcqe)
8757{
8758 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8759 struct lpfc_iocbq *cmdiocbq;
8760 struct lpfc_iocbq irspiocbq;
8761 unsigned long iflags;
8762
8763 spin_lock_irqsave(&phba->hbalock, iflags);
8764 pring->stats.iocb_event++;
8765 spin_unlock_irqrestore(&phba->hbalock, iflags);
8766
8767 /* Check for response status */
8768 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8769 /* If resource errors reported from HBA, reduce queue
8770 * depth of the SCSI device.
8771 */
8772 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8773 IOSTAT_LOCAL_REJECT) &&
8774 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8775 phba->lpfc_rampdown_queue_depth(phba);
8776 }
8777 /* Log the error status */
8778 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8779 "0373 FCP complete error: status=x%x, "
8780 "hw_status=x%x, total_data_specified=%d, "
8781 "parameter=x%x, word3=x%x\n",
8782 bf_get(lpfc_wcqe_c_status, wcqe),
8783 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8784 wcqe->total_data_placed, wcqe->parameter,
8785 wcqe->word3);
8786 }
8787
8788 /* Look up the FCP command IOCB and create pseudo response IOCB */
8789 spin_lock_irqsave(&phba->hbalock, iflags);
8790 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8791 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8792 spin_unlock_irqrestore(&phba->hbalock, iflags);
8793 if (unlikely(!cmdiocbq)) {
8794 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8795 "0374 FCP complete with no corresponding "
8796 "cmdiocb: iotag (%d)\n",
8797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8798 return;
8799 }
8800 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8802 "0375 FCP cmdiocb not callback function "
8803 "iotag: (%d)\n",
8804 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8805 return;
8806 }
8807
8808 /* Fake the irspiocb and copy necessary response information */
8809 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8810
8811 /* Pass the cmd_iocb and the rsp state to the upper layer */
8812 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8813}
8814
8815/**
8816 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8817 * @phba: Pointer to HBA context object.
8818 * @cq: Pointer to completion queue.
8819 * @wcqe: Pointer to work-queue completion queue entry.
8820 *
8821 * This routine handles an fast-path WQ entry comsumed event by invoking the
8822 * proper WQ release routine to the slow-path WQ.
8823 **/
8824static void
8825lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8826 struct lpfc_wcqe_release *wcqe)
8827{
8828 struct lpfc_queue *childwq;
8829 bool wqid_matched = false;
8830 uint16_t fcp_wqid;
8831
8832 /* Check for fast-path FCP work queue release */
8833 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8834 list_for_each_entry(childwq, &cq->child_list, list) {
8835 if (childwq->queue_id == fcp_wqid) {
8836 lpfc_sli4_wq_release(childwq,
8837 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8838 wqid_matched = true;
8839 break;
8840 }
8841 }
8842 /* Report warning log message if no match found */
8843 if (wqid_matched != true)
8844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8845 "2580 Fast-path wqe consume event carries "
8846 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8847}
8848
8849/**
8850 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8851 * @cq: Pointer to the completion queue.
8852 * @eqe: Pointer to fast-path completion queue entry.
8853 *
8854 * This routine process a fast-path work queue completion entry from fast-path
8855 * event queue for FCP command response completion.
8856 **/
8857static int
8858lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8859 struct lpfc_cqe *cqe)
8860{
8861 struct lpfc_wcqe_release wcqe;
8862 bool workposted = false;
8863
8864 /* Copy the work queue CQE and convert endian order if needed */
8865 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8866
8867 /* Check and process for different type of WCQE and dispatch */
8868 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8869 case CQE_CODE_COMPL_WQE:
8870 /* Process the WQ complete event */
8871 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8872 (struct lpfc_wcqe_complete *)&wcqe);
8873 break;
8874 case CQE_CODE_RELEASE_WQE:
8875 /* Process the WQ release event */
8876 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
8877 (struct lpfc_wcqe_release *)&wcqe);
8878 break;
8879 case CQE_CODE_XRI_ABORTED:
8880 /* Process the WQ XRI abort event */
8881 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8882 (struct sli4_wcqe_xri_aborted *)&wcqe);
8883 break;
8884 default:
8885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8886 "0144 Not a valid WCQE code: x%x\n",
8887 bf_get(lpfc_wcqe_c_code, &wcqe));
8888 break;
8889 }
8890 return workposted;
8891}
8892
8893/**
8894 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
8895 * @phba: Pointer to HBA context object.
8896 * @eqe: Pointer to fast-path event queue entry.
8897 *
8898 * This routine process a event queue entry from the fast-path event queue.
8899 * It will check the MajorCode and MinorCode to determine this is for a
8900 * completion event on a completion queue, if not, an error shall be logged
8901 * and just return. Otherwise, it will get to the corresponding completion
8902 * queue and process all the entries on the completion queue, rearm the
8903 * completion queue, and then return.
8904 **/
8905static void
8906lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
8907 uint32_t fcp_cqidx)
8908{
8909 struct lpfc_queue *cq;
8910 struct lpfc_cqe *cqe;
8911 bool workposted = false;
8912 uint16_t cqid;
8913 int ecount = 0;
8914
8915 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
8916 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8918 "0366 Not a valid fast-path completion "
8919 "event: majorcode=x%x, minorcode=x%x\n",
8920 bf_get(lpfc_eqe_major_code, eqe),
8921 bf_get(lpfc_eqe_minor_code, eqe));
8922 return;
8923 }
8924
8925 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
8926 if (unlikely(!cq)) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8928 "0367 Fast-path completion queue does not "
8929 "exist\n");
8930 return;
8931 }
8932
8933 /* Get the reference to the corresponding CQ */
8934 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8935 if (unlikely(cqid != cq->queue_id)) {
8936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8937 "0368 Miss-matched fast-path completion "
8938 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
8939 cqid, cq->queue_id);
8940 return;
8941 }
8942
8943 /* Process all the entries to the CQ */
8944 while ((cqe = lpfc_sli4_cq_get(cq))) {
8945 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
8946 if (!(++ecount % LPFC_GET_QE_REL_INT))
8947 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8948 }
8949
8950 /* Catch the no cq entry condition */
8951 if (unlikely(ecount == 0))
8952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8953 "0369 No entry from fast-path completion "
8954 "queue fcpcqid=%d\n", cq->queue_id);
8955
8956 /* In any case, flash and re-arm the CQ */
8957 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8958
8959 /* wake up worker thread if there are works to be done */
8960 if (workposted)
8961 lpfc_worker_wake_up(phba);
8962}
8963
8964static void
8965lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
8966{
8967 struct lpfc_eqe *eqe;
8968
8969 /* walk all the EQ entries and drop on the floor */
8970 while ((eqe = lpfc_sli4_eq_get(eq)))
8971 ;
8972
8973 /* Clear and re-arm the EQ */
8974 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
8975}
8976
8977/**
8978 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
8979 * @irq: Interrupt number.
8980 * @dev_id: The device context pointer.
8981 *
8982 * This function is directly called from the PCI layer as an interrupt
8983 * service routine when device with SLI-4 interface spec is enabled with
8984 * MSI-X multi-message interrupt mode and there are slow-path events in
8985 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8986 * interrupt mode, this function is called as part of the device-level
8987 * interrupt handler. When the PCI slot is in error recovery or the HBA is
8988 * undergoing initialization, the interrupt handler will not process the
8989 * interrupt. The link attention and ELS ring attention events are handled
8990 * by the worker thread. The interrupt handler signals the worker thread
8991 * and returns for these events. This function is called without any lock
8992 * held. It gets the hbalock to access and update SLI data structures.
8993 *
8994 * This function returns IRQ_HANDLED when interrupt is handled else it
8995 * returns IRQ_NONE.
8996 **/
8997irqreturn_t
8998lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
8999{
9000 struct lpfc_hba *phba;
9001 struct lpfc_queue *speq;
9002 struct lpfc_eqe *eqe;
9003 unsigned long iflag;
9004 int ecount = 0;
9005
9006 /*
9007 * Get the driver's phba structure from the dev_id
9008 */
9009 phba = (struct lpfc_hba *)dev_id;
9010
9011 if (unlikely(!phba))
9012 return IRQ_NONE;
9013
9014 /* Get to the EQ struct associated with this vector */
9015 speq = phba->sli4_hba.sp_eq;
9016
9017 /* Check device state for handling interrupt */
9018 if (unlikely(lpfc_intr_state_check(phba))) {
9019 /* Check again for link_state with lock held */
9020 spin_lock_irqsave(&phba->hbalock, iflag);
9021 if (phba->link_state < LPFC_LINK_DOWN)
9022 /* Flush, clear interrupt, and rearm the EQ */
9023 lpfc_sli4_eq_flush(phba, speq);
9024 spin_unlock_irqrestore(&phba->hbalock, iflag);
9025 return IRQ_NONE;
9026 }
9027
9028 /*
9029 * Process all the event on FCP slow-path EQ
9030 */
9031 while ((eqe = lpfc_sli4_eq_get(speq))) {
9032 lpfc_sli4_sp_handle_eqe(phba, eqe);
9033 if (!(++ecount % LPFC_GET_QE_REL_INT))
9034 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9035 }
9036
9037 /* Always clear and re-arm the slow-path EQ */
9038 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9039
9040 /* Catch the no cq entry condition */
9041 if (unlikely(ecount == 0)) {
9042 if (phba->intr_type == MSIX)
9043 /* MSI-X treated interrupt served as no EQ share INT */
9044 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9045 "0357 MSI-X interrupt with no EQE\n");
9046 else
9047 /* Non MSI-X treated on interrupt as EQ share INT */
9048 return IRQ_NONE;
9049 }
9050
9051 return IRQ_HANDLED;
9052} /* lpfc_sli4_sp_intr_handler */
9053
9054/**
9055 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9056 * @irq: Interrupt number.
9057 * @dev_id: The device context pointer.
9058 *
9059 * This function is directly called from the PCI layer as an interrupt
9060 * service routine when device with SLI-4 interface spec is enabled with
9061 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9062 * ring event in the HBA. However, when the device is enabled with either
9063 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9064 * device-level interrupt handler. When the PCI slot is in error recovery
9065 * or the HBA is undergoing initialization, the interrupt handler will not
9066 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9067 * the intrrupt context. This function is called without any lock held.
9068 * It gets the hbalock to access and update SLI data structures. Note that,
9069 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9070 * equal to that of FCP CQ index.
9071 *
9072 * This function returns IRQ_HANDLED when interrupt is handled else it
9073 * returns IRQ_NONE.
9074 **/
9075irqreturn_t
9076lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9077{
9078 struct lpfc_hba *phba;
9079 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9080 struct lpfc_queue *fpeq;
9081 struct lpfc_eqe *eqe;
9082 unsigned long iflag;
9083 int ecount = 0;
9084 uint32_t fcp_eqidx;
9085
9086 /* Get the driver's phba structure from the dev_id */
9087 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9088 phba = fcp_eq_hdl->phba;
9089 fcp_eqidx = fcp_eq_hdl->idx;
9090
9091 if (unlikely(!phba))
9092 return IRQ_NONE;
9093
9094 /* Get to the EQ struct associated with this vector */
9095 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9096
9097 /* Check device state for handling interrupt */
9098 if (unlikely(lpfc_intr_state_check(phba))) {
9099 /* Check again for link_state with lock held */
9100 spin_lock_irqsave(&phba->hbalock, iflag);
9101 if (phba->link_state < LPFC_LINK_DOWN)
9102 /* Flush, clear interrupt, and rearm the EQ */
9103 lpfc_sli4_eq_flush(phba, fpeq);
9104 spin_unlock_irqrestore(&phba->hbalock, iflag);
9105 return IRQ_NONE;
9106 }
9107
9108 /*
9109 * Process all the event on FCP fast-path EQ
9110 */
9111 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9112 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9113 if (!(++ecount % LPFC_GET_QE_REL_INT))
9114 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9115 }
9116
9117 /* Always clear and re-arm the fast-path EQ */
9118 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9119
9120 if (unlikely(ecount == 0)) {
9121 if (phba->intr_type == MSIX)
9122 /* MSI-X treated interrupt served as no EQ share INT */
9123 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9124 "0358 MSI-X interrupt with no EQE\n");
9125 else
9126 /* Non MSI-X treated on interrupt as EQ share INT */
9127 return IRQ_NONE;
9128 }
9129
9130 return IRQ_HANDLED;
9131} /* lpfc_sli4_fp_intr_handler */
9132
9133/**
9134 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9135 * @irq: Interrupt number.
9136 * @dev_id: The device context pointer.
9137 *
9138 * This function is the device-level interrupt handler to device with SLI-4
9139 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9140 * interrupt mode is enabled and there is an event in the HBA which requires
9141 * driver attention. This function invokes the slow-path interrupt attention
9142 * handling function and fast-path interrupt attention handling function in
9143 * turn to process the relevant HBA attention events. This function is called
9144 * without any lock held. It gets the hbalock to access and update SLI data
9145 * structures.
9146 *
9147 * This function returns IRQ_HANDLED when interrupt is handled, else it
9148 * returns IRQ_NONE.
9149 **/
9150irqreturn_t
9151lpfc_sli4_intr_handler(int irq, void *dev_id)
9152{
9153 struct lpfc_hba *phba;
9154 irqreturn_t sp_irq_rc, fp_irq_rc;
9155 bool fp_handled = false;
9156 uint32_t fcp_eqidx;
9157
9158 /* Get the driver's phba structure from the dev_id */
9159 phba = (struct lpfc_hba *)dev_id;
9160
9161 if (unlikely(!phba))
9162 return IRQ_NONE;
9163
9164 /*
9165 * Invokes slow-path host attention interrupt handling as appropriate.
9166 */
9167 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9168
9169 /*
9170 * Invoke fast-path host attention interrupt handling as appropriate.
9171 */
9172 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9173 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9174 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9175 if (fp_irq_rc == IRQ_HANDLED)
9176 fp_handled |= true;
9177 }
9178
9179 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9180} /* lpfc_sli4_intr_handler */
9181
9182/**
9183 * lpfc_sli4_queue_free - free a queue structure and associated memory
9184 * @queue: The queue structure to free.
9185 *
9186 * This function frees a queue structure and the DMAable memeory used for
9187 * the host resident queue. This function must be called after destroying the
9188 * queue on the HBA.
9189 **/
9190void
9191lpfc_sli4_queue_free(struct lpfc_queue *queue)
9192{
9193 struct lpfc_dmabuf *dmabuf;
9194
9195 if (!queue)
9196 return;
9197
9198 while (!list_empty(&queue->page_list)) {
9199 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9200 list);
9201 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9202 dmabuf->virt, dmabuf->phys);
9203 kfree(dmabuf);
9204 }
9205 kfree(queue);
9206 return;
9207}
9208
9209/**
9210 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9211 * @phba: The HBA that this queue is being created on.
9212 * @entry_size: The size of each queue entry for this queue.
9213 * @entry count: The number of entries that this queue will handle.
9214 *
9215 * This function allocates a queue structure and the DMAable memory used for
9216 * the host resident queue. This function must be called before creating the
9217 * queue on the HBA.
9218 **/
9219struct lpfc_queue *
9220lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9221 uint32_t entry_count)
9222{
9223 struct lpfc_queue *queue;
9224 struct lpfc_dmabuf *dmabuf;
9225 int x, total_qe_count;
9226 void *dma_pointer;
9227
9228
9229 queue = kzalloc(sizeof(struct lpfc_queue) +
9230 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9231 if (!queue)
9232 return NULL;
9233 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9234 INIT_LIST_HEAD(&queue->list);
9235 INIT_LIST_HEAD(&queue->page_list);
9236 INIT_LIST_HEAD(&queue->child_list);
9237 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9238 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9239 if (!dmabuf)
9240 goto out_fail;
9241 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9242 PAGE_SIZE, &dmabuf->phys,
9243 GFP_KERNEL);
9244 if (!dmabuf->virt) {
9245 kfree(dmabuf);
9246 goto out_fail;
9247 }
9248 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */
9251 dma_pointer = dmabuf->virt;
9252 for (; total_qe_count < entry_count &&
9253 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9254 total_qe_count++, dma_pointer += entry_size) {
9255 queue->qe[total_qe_count].address = dma_pointer;
9256 }
9257 }
9258 queue->entry_size = entry_size;
9259 queue->entry_count = entry_count;
9260 queue->phba = phba;
9261
9262 return queue;
9263out_fail:
9264 lpfc_sli4_queue_free(queue);
9265 return NULL;
9266}
9267
9268/**
9269 * lpfc_eq_create - Create an Event Queue on the HBA
9270 * @phba: HBA structure that indicates port to create a queue on.
9271 * @eq: The queue structure to use to create the event queue.
9272 * @imax: The maximum interrupt per second limit.
9273 *
9274 * This function creates an event queue, as detailed in @eq, on a port,
9275 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9276 *
9277 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9278 * is used to get the entry count and entry size that are necessary to
9279 * determine the number of pages to allocate and use for this queue. This
9280 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9281 * event queue. This function is asynchronous and will wait for the mailbox
9282 * command to finish before continuing.
9283 *
9284 * On success this function will return a zero. If unable to allocate enough
9285 * memory this function will return ENOMEM. If the queue create mailbox command
9286 * fails this function will return ENXIO.
9287 **/
9288uint32_t
9289lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9290{
9291 struct lpfc_mbx_eq_create *eq_create;
9292 LPFC_MBOXQ_t *mbox;
9293 int rc, length, status = 0;
9294 struct lpfc_dmabuf *dmabuf;
9295 uint32_t shdr_status, shdr_add_status;
9296 union lpfc_sli4_cfg_shdr *shdr;
9297 uint16_t dmult;
9298
9299 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9300 if (!mbox)
9301 return -ENOMEM;
9302 length = (sizeof(struct lpfc_mbx_eq_create) -
9303 sizeof(struct lpfc_sli4_cfg_mhdr));
9304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9305 LPFC_MBOX_OPCODE_EQ_CREATE,
9306 length, LPFC_SLI4_MBX_EMBED);
9307 eq_create = &mbox->u.mqe.un.eq_create;
9308 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9309 eq->page_count);
9310 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9311 LPFC_EQE_SIZE);
9312 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9313 /* Calculate delay multiper from maximum interrupt per second */
9314 dmult = LPFC_DMULT_CONST/imax - 1;
9315 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9316 dmult);
9317 switch (eq->entry_count) {
9318 default:
9319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9320 "0360 Unsupported EQ count. (%d)\n",
9321 eq->entry_count);
9322 if (eq->entry_count < 256)
9323 return -EINVAL;
9324 /* otherwise default to smallest count (drop through) */
9325 case 256:
9326 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9327 LPFC_EQ_CNT_256);
9328 break;
9329 case 512:
9330 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9331 LPFC_EQ_CNT_512);
9332 break;
9333 case 1024:
9334 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9335 LPFC_EQ_CNT_1024);
9336 break;
9337 case 2048:
9338 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9339 LPFC_EQ_CNT_2048);
9340 break;
9341 case 4096:
9342 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9343 LPFC_EQ_CNT_4096);
9344 break;
9345 }
9346 list_for_each_entry(dmabuf, &eq->page_list, list) {
9347 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9348 putPaddrLow(dmabuf->phys);
9349 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9350 putPaddrHigh(dmabuf->phys);
9351 }
9352 mbox->vport = phba->pport;
9353 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9354 mbox->context1 = NULL;
9355 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9356 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9359 if (shdr_status || shdr_add_status || rc) {
9360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9361 "2500 EQ_CREATE mailbox failed with "
9362 "status x%x add_status x%x, mbx status x%x\n",
9363 shdr_status, shdr_add_status, rc);
9364 status = -ENXIO;
9365 }
9366 eq->type = LPFC_EQ;
9367 eq->subtype = LPFC_NONE;
9368 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9369 if (eq->queue_id == 0xFFFF)
9370 status = -ENXIO;
9371 eq->host_index = 0;
9372 eq->hba_index = 0;
9373
9374 if (rc != MBX_TIMEOUT)
9375 mempool_free(mbox, phba->mbox_mem_pool);
9376 return status;
9377}
9378
9379/**
9380 * lpfc_cq_create - Create a Completion Queue on the HBA
9381 * @phba: HBA structure that indicates port to create a queue on.
9382 * @cq: The queue structure to use to create the completion queue.
9383 * @eq: The event queue to bind this completion queue to.
9384 *
9385 * This function creates a completion queue, as detailed in @wq, on a port,
9386 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9387 *
9388 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9389 * is used to get the entry count and entry size that are necessary to
9390 * determine the number of pages to allocate and use for this queue. The @eq
9391 * is used to indicate which event queue to bind this completion queue to. This
9392 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9393 * completion queue. This function is asynchronous and will wait for the mailbox
9394 * command to finish before continuing.
9395 *
9396 * On success this function will return a zero. If unable to allocate enough
9397 * memory this function will return ENOMEM. If the queue create mailbox command
9398 * fails this function will return ENXIO.
9399 **/
9400uint32_t
9401lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9402 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9403{
9404 struct lpfc_mbx_cq_create *cq_create;
9405 struct lpfc_dmabuf *dmabuf;
9406 LPFC_MBOXQ_t *mbox;
9407 int rc, length, status = 0;
9408 uint32_t shdr_status, shdr_add_status;
9409 union lpfc_sli4_cfg_shdr *shdr;
9410
9411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9412 if (!mbox)
9413 return -ENOMEM;
9414 length = (sizeof(struct lpfc_mbx_cq_create) -
9415 sizeof(struct lpfc_sli4_cfg_mhdr));
9416 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9417 LPFC_MBOX_OPCODE_CQ_CREATE,
9418 length, LPFC_SLI4_MBX_EMBED);
9419 cq_create = &mbox->u.mqe.un.cq_create;
9420 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9421 cq->page_count);
9422 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9423 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9424 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9425 switch (cq->entry_count) {
9426 default:
9427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9428 "0361 Unsupported CQ count. (%d)\n",
9429 cq->entry_count);
9430 if (cq->entry_count < 256)
9431 return -EINVAL;
9432 /* otherwise default to smallest count (drop through) */
9433 case 256:
9434 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9435 LPFC_CQ_CNT_256);
9436 break;
9437 case 512:
9438 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9439 LPFC_CQ_CNT_512);
9440 break;
9441 case 1024:
9442 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9443 LPFC_CQ_CNT_1024);
9444 break;
9445 }
9446 list_for_each_entry(dmabuf, &cq->page_list, list) {
9447 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9448 putPaddrLow(dmabuf->phys);
9449 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9450 putPaddrHigh(dmabuf->phys);
9451 }
9452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9453
9454 /* The IOCTL status is embedded in the mailbox subheader. */
9455 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9458 if (shdr_status || shdr_add_status || rc) {
9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460 "2501 CQ_CREATE mailbox failed with "
9461 "status x%x add_status x%x, mbx status x%x\n",
9462 shdr_status, shdr_add_status, rc);
9463 status = -ENXIO;
9464 goto out;
9465 }
9466 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9467 if (cq->queue_id == 0xFFFF) {
9468 status = -ENXIO;
9469 goto out;
9470 }
9471 /* link the cq onto the parent eq child list */
9472 list_add_tail(&cq->list, &eq->child_list);
9473 /* Set up completion queue's type and subtype */
9474 cq->type = type;
9475 cq->subtype = subtype;
9476 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9477 cq->host_index = 0;
9478 cq->hba_index = 0;
9479out:
9480
9481 if (rc != MBX_TIMEOUT)
9482 mempool_free(mbox, phba->mbox_mem_pool);
9483 return status;
9484}
9485
9486/**
9487 * lpfc_mq_create - Create a mailbox Queue on the HBA
9488 * @phba: HBA structure that indicates port to create a queue on.
9489 * @mq: The queue structure to use to create the mailbox queue.
9490 *
9491 * This function creates a mailbox queue, as detailed in @mq, on a port,
9492 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9493 *
9494 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9495 * is used to get the entry count and entry size that are necessary to
9496 * determine the number of pages to allocate and use for this queue. This
9497 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9498 * mailbox queue. This function is asynchronous and will wait for the mailbox
9499 * command to finish before continuing.
9500 *
9501 * On success this function will return a zero. If unable to allocate enough
9502 * memory this function will return ENOMEM. If the queue create mailbox command
9503 * fails this function will return ENXIO.
9504 **/
9505uint32_t
9506lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9507 struct lpfc_queue *cq, uint32_t subtype)
9508{
9509 struct lpfc_mbx_mq_create *mq_create;
9510 struct lpfc_dmabuf *dmabuf;
9511 LPFC_MBOXQ_t *mbox;
9512 int rc, length, status = 0;
9513 uint32_t shdr_status, shdr_add_status;
9514 union lpfc_sli4_cfg_shdr *shdr;
9515
9516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9517 if (!mbox)
9518 return -ENOMEM;
9519 length = (sizeof(struct lpfc_mbx_mq_create) -
9520 sizeof(struct lpfc_sli4_cfg_mhdr));
9521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9522 LPFC_MBOX_OPCODE_MQ_CREATE,
9523 length, LPFC_SLI4_MBX_EMBED);
9524 mq_create = &mbox->u.mqe.un.mq_create;
9525 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9526 mq->page_count);
9527 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9528 cq->queue_id);
9529 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9530 switch (mq->entry_count) {
9531 default:
9532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9533 "0362 Unsupported MQ count. (%d)\n",
9534 mq->entry_count);
9535 if (mq->entry_count < 16)
9536 return -EINVAL;
9537 /* otherwise default to smallest count (drop through) */
9538 case 16:
9539 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9540 LPFC_MQ_CNT_16);
9541 break;
9542 case 32:
9543 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9544 LPFC_MQ_CNT_32);
9545 break;
9546 case 64:
9547 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9548 LPFC_MQ_CNT_64);
9549 break;
9550 case 128:
9551 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9552 LPFC_MQ_CNT_128);
9553 break;
9554 }
9555 list_for_each_entry(dmabuf, &mq->page_list, list) {
9556 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9557 putPaddrLow(dmabuf->phys);
9558 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9559 putPaddrHigh(dmabuf->phys);
9560 }
9561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9562 /* The IOCTL status is embedded in the mailbox subheader. */
9563 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9566 if (shdr_status || shdr_add_status || rc) {
9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9568 "2502 MQ_CREATE mailbox failed with "
9569 "status x%x add_status x%x, mbx status x%x\n",
9570 shdr_status, shdr_add_status, rc);
9571 status = -ENXIO;
9572 goto out;
9573 }
9574 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9575 if (mq->queue_id == 0xFFFF) {
9576 status = -ENXIO;
9577 goto out;
9578 }
9579 mq->type = LPFC_MQ;
9580 mq->subtype = subtype;
9581 mq->host_index = 0;
9582 mq->hba_index = 0;
9583
9584 /* link the mq onto the parent cq child list */
9585 list_add_tail(&mq->list, &cq->child_list);
9586out:
9587 if (rc != MBX_TIMEOUT)
9588 mempool_free(mbox, phba->mbox_mem_pool);
9589 return status;
9590}
9591
9592/**
9593 * lpfc_wq_create - Create a Work Queue on the HBA
9594 * @phba: HBA structure that indicates port to create a queue on.
9595 * @wq: The queue structure to use to create the work queue.
9596 * @cq: The completion queue to bind this work queue to.
9597 * @subtype: The subtype of the work queue indicating its functionality.
9598 *
9599 * This function creates a work queue, as detailed in @wq, on a port, described
9600 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9601 *
9602 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9603 * is used to get the entry count and entry size that are necessary to
9604 * determine the number of pages to allocate and use for this queue. The @cq
9605 * is used to indicate which completion queue to bind this work queue to. This
9606 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9607 * work queue. This function is asynchronous and will wait for the mailbox
9608 * command to finish before continuing.
9609 *
9610 * On success this function will return a zero. If unable to allocate enough
9611 * memory this function will return ENOMEM. If the queue create mailbox command
9612 * fails this function will return ENXIO.
9613 **/
9614uint32_t
9615lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9616 struct lpfc_queue *cq, uint32_t subtype)
9617{
9618 struct lpfc_mbx_wq_create *wq_create;
9619 struct lpfc_dmabuf *dmabuf;
9620 LPFC_MBOXQ_t *mbox;
9621 int rc, length, status = 0;
9622 uint32_t shdr_status, shdr_add_status;
9623 union lpfc_sli4_cfg_shdr *shdr;
9624
9625 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9626 if (!mbox)
9627 return -ENOMEM;
9628 length = (sizeof(struct lpfc_mbx_wq_create) -
9629 sizeof(struct lpfc_sli4_cfg_mhdr));
9630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9631 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9632 length, LPFC_SLI4_MBX_EMBED);
9633 wq_create = &mbox->u.mqe.un.wq_create;
9634 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9635 wq->page_count);
9636 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9637 cq->queue_id);
9638 list_for_each_entry(dmabuf, &wq->page_list, list) {
9639 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9640 putPaddrLow(dmabuf->phys);
9641 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9642 putPaddrHigh(dmabuf->phys);
9643 }
9644 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9645 /* The IOCTL status is embedded in the mailbox subheader. */
9646 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9649 if (shdr_status || shdr_add_status || rc) {
9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9651 "2503 WQ_CREATE mailbox failed with "
9652 "status x%x add_status x%x, mbx status x%x\n",
9653 shdr_status, shdr_add_status, rc);
9654 status = -ENXIO;
9655 goto out;
9656 }
9657 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9658 if (wq->queue_id == 0xFFFF) {
9659 status = -ENXIO;
9660 goto out;
9661 }
9662 wq->type = LPFC_WQ;
9663 wq->subtype = subtype;
9664 wq->host_index = 0;
9665 wq->hba_index = 0;
9666
9667 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list);
9669out:
9670 if (rc == MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status;
9673}
9674
9675/**
9676 * lpfc_rq_create - Create a Receive Queue on the HBA
9677 * @phba: HBA structure that indicates port to create a queue on.
9678 * @hrq: The queue structure to use to create the header receive queue.
9679 * @drq: The queue structure to use to create the data receive queue.
9680 * @cq: The completion queue to bind this work queue to.
9681 *
9682 * This function creates a receive buffer queue pair , as detailed in @hrq and
9683 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9684 * to the HBA.
9685 *
9686 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9687 * struct is used to get the entry count that is necessary to determine the
9688 * number of pages to use for this queue. The @cq is used to indicate which
9689 * completion queue to bind received buffers that are posted to these queues to.
9690 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9691 * receive queue pair. This function is asynchronous and will wait for the
9692 * mailbox command to finish before continuing.
9693 *
9694 * On success this function will return a zero. If unable to allocate enough
9695 * memory this function will return ENOMEM. If the queue create mailbox command
9696 * fails this function will return ENXIO.
9697 **/
9698uint32_t
9699lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9700 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9701{
9702 struct lpfc_mbx_rq_create *rq_create;
9703 struct lpfc_dmabuf *dmabuf;
9704 LPFC_MBOXQ_t *mbox;
9705 int rc, length, status = 0;
9706 uint32_t shdr_status, shdr_add_status;
9707 union lpfc_sli4_cfg_shdr *shdr;
9708
9709 if (hrq->entry_count != drq->entry_count)
9710 return -EINVAL;
9711 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9712 if (!mbox)
9713 return -ENOMEM;
9714 length = (sizeof(struct lpfc_mbx_rq_create) -
9715 sizeof(struct lpfc_sli4_cfg_mhdr));
9716 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9717 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9718 length, LPFC_SLI4_MBX_EMBED);
9719 rq_create = &mbox->u.mqe.un.rq_create;
9720 switch (hrq->entry_count) {
9721 default:
9722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9723 "2535 Unsupported RQ count. (%d)\n",
9724 hrq->entry_count);
9725 if (hrq->entry_count < 512)
9726 return -EINVAL;
9727 /* otherwise default to smallest count (drop through) */
9728 case 512:
9729 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9730 LPFC_RQ_RING_SIZE_512);
9731 break;
9732 case 1024:
9733 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9734 LPFC_RQ_RING_SIZE_1024);
9735 break;
9736 case 2048:
9737 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9738 LPFC_RQ_RING_SIZE_2048);
9739 break;
9740 case 4096:
9741 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9742 LPFC_RQ_RING_SIZE_4096);
9743 break;
9744 }
9745 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9746 cq->queue_id);
9747 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9748 hrq->page_count);
9749 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9750 LPFC_HDR_BUF_SIZE);
9751 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9752 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9753 putPaddrLow(dmabuf->phys);
9754 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9755 putPaddrHigh(dmabuf->phys);
9756 }
9757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9758 /* The IOCTL status is embedded in the mailbox subheader. */
9759 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9760 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9761 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9762 if (shdr_status || shdr_add_status || rc) {
9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9764 "2504 RQ_CREATE mailbox failed with "
9765 "status x%x add_status x%x, mbx status x%x\n",
9766 shdr_status, shdr_add_status, rc);
9767 status = -ENXIO;
9768 goto out;
9769 }
9770 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9771 if (hrq->queue_id == 0xFFFF) {
9772 status = -ENXIO;
9773 goto out;
9774 }
9775 hrq->type = LPFC_HRQ;
9776 hrq->subtype = subtype;
9777 hrq->host_index = 0;
9778 hrq->hba_index = 0;
9779
9780 /* now create the data queue */
9781 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9782 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9783 length, LPFC_SLI4_MBX_EMBED);
9784 switch (drq->entry_count) {
9785 default:
9786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9787 "2536 Unsupported RQ count. (%d)\n",
9788 drq->entry_count);
9789 if (drq->entry_count < 512)
9790 return -EINVAL;
9791 /* otherwise default to smallest count (drop through) */
9792 case 512:
9793 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9794 LPFC_RQ_RING_SIZE_512);
9795 break;
9796 case 1024:
9797 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9798 LPFC_RQ_RING_SIZE_1024);
9799 break;
9800 case 2048:
9801 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9802 LPFC_RQ_RING_SIZE_2048);
9803 break;
9804 case 4096:
9805 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9806 LPFC_RQ_RING_SIZE_4096);
9807 break;
9808 }
9809 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9810 cq->queue_id);
9811 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9812 drq->page_count);
9813 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9814 LPFC_DATA_BUF_SIZE);
9815 list_for_each_entry(dmabuf, &drq->page_list, list) {
9816 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9817 putPaddrLow(dmabuf->phys);
9818 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9819 putPaddrHigh(dmabuf->phys);
9820 }
9821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9822 /* The IOCTL status is embedded in the mailbox subheader. */
9823 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9826 if (shdr_status || shdr_add_status || rc) {
9827 status = -ENXIO;
9828 goto out;
9829 }
9830 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9831 if (drq->queue_id == 0xFFFF) {
9832 status = -ENXIO;
9833 goto out;
9834 }
9835 drq->type = LPFC_DRQ;
9836 drq->subtype = subtype;
9837 drq->host_index = 0;
9838 drq->hba_index = 0;
9839
9840 /* link the header and data RQs onto the parent cq child list */
9841 list_add_tail(&hrq->list, &cq->child_list);
9842 list_add_tail(&drq->list, &cq->child_list);
9843
9844out:
9845 if (rc != MBX_TIMEOUT)
9846 mempool_free(mbox, phba->mbox_mem_pool);
9847 return status;
9848}
9849
9850/**
9851 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9852 * @eq: The queue structure associated with the queue to destroy.
9853 *
9854 * This function destroys a queue, as detailed in @eq by sending an mailbox
9855 * command, specific to the type of queue, to the HBA.
9856 *
9857 * The @eq struct is used to get the queue ID of the queue to destroy.
9858 *
9859 * On success this function will return a zero. If the queue destroy mailbox
9860 * command fails this function will return ENXIO.
9861 **/
9862uint32_t
9863lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9864{
9865 LPFC_MBOXQ_t *mbox;
9866 int rc, length, status = 0;
9867 uint32_t shdr_status, shdr_add_status;
9868 union lpfc_sli4_cfg_shdr *shdr;
9869
9870 if (!eq)
9871 return -ENODEV;
9872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
9873 if (!mbox)
9874 return -ENOMEM;
9875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
9876 sizeof(struct lpfc_sli4_cfg_mhdr));
9877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9878 LPFC_MBOX_OPCODE_EQ_DESTROY,
9879 length, LPFC_SLI4_MBX_EMBED);
9880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
9881 eq->queue_id);
9882 mbox->vport = eq->phba->pport;
9883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9884
9885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *)
9888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
9889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9891 if (shdr_status || shdr_add_status || rc) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9893 "2505 EQ_DESTROY mailbox failed with "
9894 "status x%x add_status x%x, mbx status x%x\n",
9895 shdr_status, shdr_add_status, rc);
9896 status = -ENXIO;
9897 }
9898
9899 /* Remove eq from any list */
9900 list_del_init(&eq->list);
9901 if (rc != MBX_TIMEOUT)
9902 mempool_free(mbox, eq->phba->mbox_mem_pool);
9903 return status;
9904}
9905
9906/**
9907 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
9908 * @cq: The queue structure associated with the queue to destroy.
9909 *
9910 * This function destroys a queue, as detailed in @cq by sending an mailbox
9911 * command, specific to the type of queue, to the HBA.
9912 *
9913 * The @cq struct is used to get the queue ID of the queue to destroy.
9914 *
9915 * On success this function will return a zero. If the queue destroy mailbox
9916 * command fails this function will return ENXIO.
9917 **/
9918uint32_t
9919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
9920{
9921 LPFC_MBOXQ_t *mbox;
9922 int rc, length, status = 0;
9923 uint32_t shdr_status, shdr_add_status;
9924 union lpfc_sli4_cfg_shdr *shdr;
9925
9926 if (!cq)
9927 return -ENODEV;
9928 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
9929 if (!mbox)
9930 return -ENOMEM;
9931 length = (sizeof(struct lpfc_mbx_cq_destroy) -
9932 sizeof(struct lpfc_sli4_cfg_mhdr));
9933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9934 LPFC_MBOX_OPCODE_CQ_DESTROY,
9935 length, LPFC_SLI4_MBX_EMBED);
9936 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
9937 cq->queue_id);
9938 mbox->vport = cq->phba->pport;
9939 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9940 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
9941 /* The IOCTL status is embedded in the mailbox subheader. */
9942 shdr = (union lpfc_sli4_cfg_shdr *)
9943 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
9944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9946 if (shdr_status || shdr_add_status || rc) {
9947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9948 "2506 CQ_DESTROY mailbox failed with "
9949 "status x%x add_status x%x, mbx status x%x\n",
9950 shdr_status, shdr_add_status, rc);
9951 status = -ENXIO;
9952 }
9953 /* Remove cq from any list */
9954 list_del_init(&cq->list);
9955 if (rc != MBX_TIMEOUT)
9956 mempool_free(mbox, cq->phba->mbox_mem_pool);
9957 return status;
9958}
9959
9960/**
9961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
9962 * @qm: The queue structure associated with the queue to destroy.
9963 *
9964 * This function destroys a queue, as detailed in @mq by sending an mailbox
9965 * command, specific to the type of queue, to the HBA.
9966 *
9967 * The @mq struct is used to get the queue ID of the queue to destroy.
9968 *
9969 * On success this function will return a zero. If the queue destroy mailbox
9970 * command fails this function will return ENXIO.
9971 **/
9972uint32_t
9973lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
9974{
9975 LPFC_MBOXQ_t *mbox;
9976 int rc, length, status = 0;
9977 uint32_t shdr_status, shdr_add_status;
9978 union lpfc_sli4_cfg_shdr *shdr;
9979
9980 if (!mq)
9981 return -ENODEV;
9982 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
9983 if (!mbox)
9984 return -ENOMEM;
9985 length = (sizeof(struct lpfc_mbx_mq_destroy) -
9986 sizeof(struct lpfc_sli4_cfg_mhdr));
9987 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9988 LPFC_MBOX_OPCODE_MQ_DESTROY,
9989 length, LPFC_SLI4_MBX_EMBED);
9990 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
9991 mq->queue_id);
9992 mbox->vport = mq->phba->pport;
9993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9994 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
9995 /* The IOCTL status is embedded in the mailbox subheader. */
9996 shdr = (union lpfc_sli4_cfg_shdr *)
9997 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
9998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10000 if (shdr_status || shdr_add_status || rc) {
10001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10002 "2507 MQ_DESTROY mailbox failed with "
10003 "status x%x add_status x%x, mbx status x%x\n",
10004 shdr_status, shdr_add_status, rc);
10005 status = -ENXIO;
10006 }
10007 /* Remove mq from any list */
10008 list_del_init(&mq->list);
10009 if (rc != MBX_TIMEOUT)
10010 mempool_free(mbox, mq->phba->mbox_mem_pool);
10011 return status;
10012}
10013
10014/**
10015 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10016 * @wq: The queue structure associated with the queue to destroy.
10017 *
10018 * This function destroys a queue, as detailed in @wq by sending an mailbox
10019 * command, specific to the type of queue, to the HBA.
10020 *
10021 * The @wq struct is used to get the queue ID of the queue to destroy.
10022 *
10023 * On success this function will return a zero. If the queue destroy mailbox
10024 * command fails this function will return ENXIO.
10025 **/
10026uint32_t
10027lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10028{
10029 LPFC_MBOXQ_t *mbox;
10030 int rc, length, status = 0;
10031 uint32_t shdr_status, shdr_add_status;
10032 union lpfc_sli4_cfg_shdr *shdr;
10033
10034 if (!wq)
10035 return -ENODEV;
10036 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10037 if (!mbox)
10038 return -ENOMEM;
10039 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10040 sizeof(struct lpfc_sli4_cfg_mhdr));
10041 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10042 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10043 length, LPFC_SLI4_MBX_EMBED);
10044 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10045 wq->queue_id);
10046 mbox->vport = wq->phba->pport;
10047 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10048 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10049 shdr = (union lpfc_sli4_cfg_shdr *)
10050 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10053 if (shdr_status || shdr_add_status || rc) {
10054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10055 "2508 WQ_DESTROY mailbox failed with "
10056 "status x%x add_status x%x, mbx status x%x\n",
10057 shdr_status, shdr_add_status, rc);
10058 status = -ENXIO;
10059 }
10060 /* Remove wq from any list */
10061 list_del_init(&wq->list);
10062 if (rc != MBX_TIMEOUT)
10063 mempool_free(mbox, wq->phba->mbox_mem_pool);
10064 return status;
10065}
10066
10067/**
10068 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10069 * @rq: The queue structure associated with the queue to destroy.
10070 *
10071 * This function destroys a queue, as detailed in @rq by sending an mailbox
10072 * command, specific to the type of queue, to the HBA.
10073 *
10074 * The @rq struct is used to get the queue ID of the queue to destroy.
10075 *
10076 * On success this function will return a zero. If the queue destroy mailbox
10077 * command fails this function will return ENXIO.
10078 **/
10079uint32_t
10080lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10081 struct lpfc_queue *drq)
10082{
10083 LPFC_MBOXQ_t *mbox;
10084 int rc, length, status = 0;
10085 uint32_t shdr_status, shdr_add_status;
10086 union lpfc_sli4_cfg_shdr *shdr;
10087
10088 if (!hrq || !drq)
10089 return -ENODEV;
10090 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10091 if (!mbox)
10092 return -ENOMEM;
10093 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10094 sizeof(struct mbox_header));
10095 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10096 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10097 length, LPFC_SLI4_MBX_EMBED);
10098 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10099 hrq->queue_id);
10100 mbox->vport = hrq->phba->pport;
10101 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10102 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10103 /* The IOCTL status is embedded in the mailbox subheader. */
10104 shdr = (union lpfc_sli4_cfg_shdr *)
10105 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10106 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10107 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10108 if (shdr_status || shdr_add_status || rc) {
10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10110 "2509 RQ_DESTROY mailbox failed with "
10111 "status x%x add_status x%x, mbx status x%x\n",
10112 shdr_status, shdr_add_status, rc);
10113 if (rc != MBX_TIMEOUT)
10114 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10115 return -ENXIO;
10116 }
10117 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10118 drq->queue_id);
10119 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10120 shdr = (union lpfc_sli4_cfg_shdr *)
10121 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10122 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10123 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10124 if (shdr_status || shdr_add_status || rc) {
10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10126 "2510 RQ_DESTROY mailbox failed with "
10127 "status x%x add_status x%x, mbx status x%x\n",
10128 shdr_status, shdr_add_status, rc);
10129 status = -ENXIO;
10130 }
10131 list_del_init(&hrq->list);
10132 list_del_init(&drq->list);
10133 if (rc != MBX_TIMEOUT)
10134 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10135 return status;
10136}
10137
10138/**
10139 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10140 * @phba: The virtual port for which this call being executed.
10141 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10142 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10143 * @xritag: the xritag that ties this io to the SGL pages.
10144 *
10145 * This routine will post the sgl pages for the IO that has the xritag
10146 * that is in the iocbq structure. The xritag is assigned during iocbq
10147 * creation and persists for as long as the driver is loaded.
10148 * if the caller has fewer than 256 scatter gather segments to map then
10149 * pdma_phys_addr1 should be 0.
10150 * If the caller needs to map more than 256 scatter gather segment then
10151 * pdma_phys_addr1 should be a valid physical address.
10152 * physical address for SGLs must be 64 byte aligned.
10153 * If you are going to map 2 SGL's then the first one must have 256 entries
10154 * the second sgl can have between 1 and 256 entries.
10155 *
10156 * Return codes:
10157 * 0 - Success
10158 * -ENXIO, -ENOMEM - Failure
10159 **/
10160int
10161lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10162 dma_addr_t pdma_phys_addr0,
10163 dma_addr_t pdma_phys_addr1,
10164 uint16_t xritag)
10165{
10166 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10167 LPFC_MBOXQ_t *mbox;
10168 int rc;
10169 uint32_t shdr_status, shdr_add_status;
10170 union lpfc_sli4_cfg_shdr *shdr;
10171
10172 if (xritag == NO_XRI) {
10173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10174 "0364 Invalid param:\n");
10175 return -EINVAL;
10176 }
10177
10178 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10179 if (!mbox)
10180 return -ENOMEM;
10181
10182 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10183 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10184 sizeof(struct lpfc_mbx_post_sgl_pages) -
10185 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10186
10187 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10188 &mbox->u.mqe.un.post_sgl_pages;
10189 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10190 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10191
10192 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10193 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10194 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10195 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10196
10197 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10198 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10199 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10200 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10201 if (!phba->sli4_hba.intr_enable)
10202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10203 else
10204 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10205 /* The IOCTL status is embedded in the mailbox subheader. */
10206 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10207 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10208 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10209 if (rc != MBX_TIMEOUT)
10210 mempool_free(mbox, phba->mbox_mem_pool);
10211 if (shdr_status || shdr_add_status || rc) {
10212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10213 "2511 POST_SGL mailbox failed with "
10214 "status x%x add_status x%x, mbx status x%x\n",
10215 shdr_status, shdr_add_status, rc);
10216 rc = -ENXIO;
10217 }
10218 return 0;
10219}
10220/**
10221 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10222 * @phba: The virtual port for which this call being executed.
10223 *
10224 * This routine will remove all of the sgl pages registered with the hba.
10225 *
10226 * Return codes:
10227 * 0 - Success
10228 * -ENXIO, -ENOMEM - Failure
10229 **/
10230int
10231lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10232{
10233 LPFC_MBOXQ_t *mbox;
10234 int rc;
10235 uint32_t shdr_status, shdr_add_status;
10236 union lpfc_sli4_cfg_shdr *shdr;
10237
10238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10239 if (!mbox)
10240 return -ENOMEM;
10241
10242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10243 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10244 LPFC_SLI4_MBX_EMBED);
10245 if (!phba->sli4_hba.intr_enable)
10246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10247 else
10248 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10249 /* The IOCTL status is embedded in the mailbox subheader. */
10250 shdr = (union lpfc_sli4_cfg_shdr *)
10251 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10252 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10254 if (rc != MBX_TIMEOUT)
10255 mempool_free(mbox, phba->mbox_mem_pool);
10256 if (shdr_status || shdr_add_status || rc) {
10257 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10258 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10259 "status x%x add_status x%x, mbx status x%x\n",
10260 shdr_status, shdr_add_status, rc);
10261 rc = -ENXIO;
10262 }
10263 return rc;
10264}
10265
10266/**
10267 * lpfc_sli4_next_xritag - Get an xritag for the io
10268 * @phba: Pointer to HBA context object.
10269 *
10270 * This function gets an xritag for the iocb. If there is no unused xritag
10271 * it will return 0xffff.
10272 * The function returns the allocated xritag if successful, else returns zero.
10273 * Zero is not a valid xritag.
10274 * The caller is not required to hold any lock.
10275 **/
10276uint16_t
10277lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10278{
10279 uint16_t xritag;
10280
10281 spin_lock_irq(&phba->hbalock);
10282 xritag = phba->sli4_hba.next_xri;
10283 if ((xritag != (uint16_t) -1) && xritag <
10284 (phba->sli4_hba.max_cfg_param.max_xri
10285 + phba->sli4_hba.max_cfg_param.xri_base)) {
10286 phba->sli4_hba.next_xri++;
10287 phba->sli4_hba.max_cfg_param.xri_used++;
10288 spin_unlock_irq(&phba->hbalock);
10289 return xritag;
10290 }
10291 spin_unlock_irq(&phba->hbalock);
10292
10293 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10294 "2004 Failed to allocate XRI.last XRITAG is %d"
10295 " Max XRI is %d, Used XRI is %d\n",
10296 phba->sli4_hba.next_xri,
10297 phba->sli4_hba.max_cfg_param.max_xri,
10298 phba->sli4_hba.max_cfg_param.xri_used);
10299 return -1;
10300}
10301
10302/**
10303 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10304 * @phba: pointer to lpfc hba data structure.
10305 *
10306 * This routine is invoked to post a block of driver's sgl pages to the
10307 * HBA using non-embedded mailbox command. No Lock is held. This routine
10308 * is only called when the driver is loading and after all IO has been
10309 * stopped.
10310 **/
10311int
10312lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10313{
10314 struct lpfc_sglq *sglq_entry;
10315 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10316 struct sgl_page_pairs *sgl_pg_pairs;
10317 void *viraddr;
10318 LPFC_MBOXQ_t *mbox;
10319 uint32_t reqlen, alloclen, pg_pairs;
10320 uint32_t mbox_tmo;
10321 uint16_t xritag_start = 0;
10322 int els_xri_cnt, rc = 0;
10323 uint32_t shdr_status, shdr_add_status;
10324 union lpfc_sli4_cfg_shdr *shdr;
10325
10326 /* The number of sgls to be posted */
10327 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10328
10329 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10330 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10331 if (reqlen > PAGE_SIZE) {
10332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10333 "2559 Block sgl registration required DMA "
10334 "size (%d) great than a page\n", reqlen);
10335 return -ENOMEM;
10336 }
10337 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10338 if (!mbox) {
10339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10340 "2560 Failed to allocate mbox cmd memory\n");
10341 return -ENOMEM;
10342 }
10343
10344 /* Allocate DMA memory and set up the non-embedded mailbox command */
10345 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10346 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10347 LPFC_SLI4_MBX_NEMBED);
10348
10349 if (alloclen < reqlen) {
10350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10351 "0285 Allocated DMA memory size (%d) is "
10352 "less than the requested DMA memory "
10353 "size (%d)\n", alloclen, reqlen);
10354 lpfc_sli4_mbox_cmd_free(phba, mbox);
10355 return -ENOMEM;
10356 }
10357
10358 /* Get the first SGE entry from the non-embedded DMA memory */
10359 if (unlikely(!mbox->sge_array)) {
10360 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10361 "2525 Failed to get the non-embedded SGE "
10362 "virtual address\n");
10363 lpfc_sli4_mbox_cmd_free(phba, mbox);
10364 return -ENOMEM;
10365 }
10366 viraddr = mbox->sge_array->addr[0];
10367
10368 /* Set up the SGL pages in the non-embedded DMA pages */
10369 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10370 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10371
10372 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10373 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10374 /* Set up the sge entry */
10375 sgl_pg_pairs->sgl_pg0_addr_lo =
10376 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10377 sgl_pg_pairs->sgl_pg0_addr_hi =
10378 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10379 sgl_pg_pairs->sgl_pg1_addr_lo =
10380 cpu_to_le32(putPaddrLow(0));
10381 sgl_pg_pairs->sgl_pg1_addr_hi =
10382 cpu_to_le32(putPaddrHigh(0));
10383 /* Keep the first xritag on the list */
10384 if (pg_pairs == 0)
10385 xritag_start = sglq_entry->sli4_xritag;
10386 sgl_pg_pairs++;
10387 }
10388 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10389 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10390 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10391 /* Perform endian conversion if necessary */
10392 sgl->word0 = cpu_to_le32(sgl->word0);
10393
10394 if (!phba->sli4_hba.intr_enable)
10395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10396 else {
10397 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10398 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10399 }
10400 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10403 if (rc != MBX_TIMEOUT)
10404 lpfc_sli4_mbox_cmd_free(phba, mbox);
10405 if (shdr_status || shdr_add_status || rc) {
10406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10407 "2513 POST_SGL_BLOCK mailbox command failed "
10408 "status x%x add_status x%x mbx status x%x\n",
10409 shdr_status, shdr_add_status, rc);
10410 rc = -ENXIO;
10411 }
10412 return rc;
10413}
10414
10415/**
10416 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10417 * @phba: pointer to lpfc hba data structure.
10418 * @sblist: pointer to scsi buffer list.
10419 * @count: number of scsi buffers on the list.
10420 *
10421 * This routine is invoked to post a block of @count scsi sgl pages from a
10422 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10423 * No Lock is held.
10424 *
10425 **/
10426int
10427lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10428 int cnt)
10429{
10430 struct lpfc_scsi_buf *psb;
10431 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10432 struct sgl_page_pairs *sgl_pg_pairs;
10433 void *viraddr;
10434 LPFC_MBOXQ_t *mbox;
10435 uint32_t reqlen, alloclen, pg_pairs;
10436 uint32_t mbox_tmo;
10437 uint16_t xritag_start = 0;
10438 int rc = 0;
10439 uint32_t shdr_status, shdr_add_status;
10440 dma_addr_t pdma_phys_bpl1;
10441 union lpfc_sli4_cfg_shdr *shdr;
10442
10443 /* Calculate the requested length of the dma memory */
10444 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10445 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10446 if (reqlen > PAGE_SIZE) {
10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10448 "0217 Block sgl registration required DMA "
10449 "size (%d) great than a page\n", reqlen);
10450 return -ENOMEM;
10451 }
10452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10453 if (!mbox) {
10454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10455 "0283 Failed to allocate mbox cmd memory\n");
10456 return -ENOMEM;
10457 }
10458
10459 /* Allocate DMA memory and set up the non-embedded mailbox command */
10460 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10461 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10462 LPFC_SLI4_MBX_NEMBED);
10463
10464 if (alloclen < reqlen) {
10465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10466 "2561 Allocated DMA memory size (%d) is "
10467 "less than the requested DMA memory "
10468 "size (%d)\n", alloclen, reqlen);
10469 lpfc_sli4_mbox_cmd_free(phba, mbox);
10470 return -ENOMEM;
10471 }
10472
10473 /* Get the first SGE entry from the non-embedded DMA memory */
10474 if (unlikely(!mbox->sge_array)) {
10475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10476 "2565 Failed to get the non-embedded SGE "
10477 "virtual address\n");
10478 lpfc_sli4_mbox_cmd_free(phba, mbox);
10479 return -ENOMEM;
10480 }
10481 viraddr = mbox->sge_array->addr[0];
10482
10483 /* Set up the SGL pages in the non-embedded DMA pages */
10484 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10485 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10486
10487 pg_pairs = 0;
10488 list_for_each_entry(psb, sblist, list) {
10489 /* Set up the sge entry */
10490 sgl_pg_pairs->sgl_pg0_addr_lo =
10491 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10492 sgl_pg_pairs->sgl_pg0_addr_hi =
10493 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10494 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10495 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10496 else
10497 pdma_phys_bpl1 = 0;
10498 sgl_pg_pairs->sgl_pg1_addr_lo =
10499 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10500 sgl_pg_pairs->sgl_pg1_addr_hi =
10501 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10502 /* Keep the first xritag on the list */
10503 if (pg_pairs == 0)
10504 xritag_start = psb->cur_iocbq.sli4_xritag;
10505 sgl_pg_pairs++;
10506 pg_pairs++;
10507 }
10508 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10509 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10510 /* Perform endian conversion if necessary */
10511 sgl->word0 = cpu_to_le32(sgl->word0);
10512
10513 if (!phba->sli4_hba.intr_enable)
10514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10515 else {
10516 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10517 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10518 }
10519 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10522 if (rc != MBX_TIMEOUT)
10523 lpfc_sli4_mbox_cmd_free(phba, mbox);
10524 if (shdr_status || shdr_add_status || rc) {
10525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10526 "2564 POST_SGL_BLOCK mailbox command failed "
10527 "status x%x add_status x%x mbx status x%x\n",
10528 shdr_status, shdr_add_status, rc);
10529 rc = -ENXIO;
10530 }
10531 return rc;
10532}
10533
10534/**
10535 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10536 * @phba: pointer to lpfc_hba struct that the frame was received on
10537 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10538 *
10539 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10540 * valid type of frame that the LPFC driver will handle. This function will
10541 * return a zero if the frame is a valid frame or a non zero value when the
10542 * frame does not pass the check.
10543 **/
10544static int
10545lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10546{
10547 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10548 char *type_names[] = FC_TYPE_NAMES_INIT;
10549 struct fc_vft_header *fc_vft_hdr;
10550
10551 switch (fc_hdr->fh_r_ctl) {
10552 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10553 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10554 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10555 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10556 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10557 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10558 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10559 case FC_RCTL_DD_CMD_STATUS: /* command status */
10560 case FC_RCTL_ELS_REQ: /* extended link services request */
10561 case FC_RCTL_ELS_REP: /* extended link services reply */
10562 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10563 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10564 case FC_RCTL_BA_NOP: /* basic link service NOP */
10565 case FC_RCTL_BA_ABTS: /* basic link service abort */
10566 case FC_RCTL_BA_RMC: /* remove connection */
10567 case FC_RCTL_BA_ACC: /* basic accept */
10568 case FC_RCTL_BA_RJT: /* basic reject */
10569 case FC_RCTL_BA_PRMT:
10570 case FC_RCTL_ACK_1: /* acknowledge_1 */
10571 case FC_RCTL_ACK_0: /* acknowledge_0 */
10572 case FC_RCTL_P_RJT: /* port reject */
10573 case FC_RCTL_F_RJT: /* fabric reject */
10574 case FC_RCTL_P_BSY: /* port busy */
10575 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10576 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10577 case FC_RCTL_LCR: /* link credit reset */
10578 case FC_RCTL_END: /* end */
10579 break;
10580 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10581 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10582 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10583 return lpfc_fc_frame_check(phba, fc_hdr);
10584 default:
10585 goto drop;
10586 }
10587 switch (fc_hdr->fh_type) {
10588 case FC_TYPE_BLS:
10589 case FC_TYPE_ELS:
10590 case FC_TYPE_FCP:
10591 case FC_TYPE_CT:
10592 break;
10593 case FC_TYPE_IP:
10594 case FC_TYPE_ILS:
10595 default:
10596 goto drop;
10597 }
10598 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10599 "2538 Received frame rctl:%s type:%s\n",
10600 rctl_names[fc_hdr->fh_r_ctl],
10601 type_names[fc_hdr->fh_type]);
10602 return 0;
10603drop:
10604 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10605 "2539 Dropped frame rctl:%s type:%s\n",
10606 rctl_names[fc_hdr->fh_r_ctl],
10607 type_names[fc_hdr->fh_type]);
10608 return 1;
10609}
10610
10611/**
10612 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10613 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10614 *
10615 * This function processes the FC header to retrieve the VFI from the VF
10616 * header, if one exists. This function will return the VFI if one exists
10617 * or 0 if no VSAN Header exists.
10618 **/
10619static uint32_t
10620lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10621{
10622 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10623
10624 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10625 return 0;
10626 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10627}
10628
10629/**
10630 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10631 * @phba: Pointer to the HBA structure to search for the vport on
10632 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10633 * @fcfi: The FC Fabric ID that the frame came from
10634 *
10635 * This function searches the @phba for a vport that matches the content of the
10636 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10637 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10638 * returns the matching vport pointer or NULL if unable to match frame to a
10639 * vport.
10640 **/
10641static struct lpfc_vport *
10642lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10643 uint16_t fcfi)
10644{
10645 struct lpfc_vport **vports;
10646 struct lpfc_vport *vport = NULL;
10647 int i;
10648 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10649 fc_hdr->fh_d_id[1] << 8 |
10650 fc_hdr->fh_d_id[2]);
10651
10652 vports = lpfc_create_vport_work_array(phba);
10653 if (vports != NULL)
10654 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10655 if (phba->fcf.fcfi == fcfi &&
10656 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10657 vports[i]->fc_myDID == did) {
10658 vport = vports[i];
10659 break;
10660 }
10661 }
10662 lpfc_destroy_vport_work_array(phba, vports);
10663 return vport;
10664}
10665
10666/**
10667 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10668 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10669 *
10670 * This function searches through the existing incomplete sequences that have
10671 * been sent to this @vport. If the frame matches one of the incomplete
10672 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10673 * make up that sequence. If no sequence is found that matches this frame then
10674 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10675 * This function returns a pointer to the first dmabuf in the sequence list that
10676 * the frame was linked to.
10677 **/
10678static struct hbq_dmabuf *
10679lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10680{
10681 struct fc_frame_header *new_hdr;
10682 struct fc_frame_header *temp_hdr;
10683 struct lpfc_dmabuf *d_buf;
10684 struct lpfc_dmabuf *h_buf;
10685 struct hbq_dmabuf *seq_dmabuf = NULL;
10686 struct hbq_dmabuf *temp_dmabuf = NULL;
10687
10688 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10689 /* Use the hdr_buf to find the sequence that this frame belongs to */
10690 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10691 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10692 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10693 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10694 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10695 continue;
10696 /* found a pending sequence that matches this frame */
10697 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10698 break;
10699 }
10700 if (!seq_dmabuf) {
10701 /*
10702 * This indicates first frame received for this sequence.
10703 * Queue the buffer on the vport's rcv_buffer_list.
10704 */
10705 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10706 return dmabuf;
10707 }
10708 temp_hdr = seq_dmabuf->hbuf.virt;
10709 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10710 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10711 return dmabuf;
10712 }
10713 /* find the correct place in the sequence to insert this frame */
10714 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10715 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10716 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10717 /*
10718 * If the frame's sequence count is greater than the frame on
10719 * the list then insert the frame right after this frame
10720 */
10721 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10722 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10723 return seq_dmabuf;
10724 }
10725 }
10726 return NULL;
10727}
10728
10729/**
10730 * lpfc_seq_complete - Indicates if a sequence is complete
10731 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10732 *
10733 * This function checks the sequence, starting with the frame described by
10734 * @dmabuf, to see if all the frames associated with this sequence are present.
10735 * the frames associated with this sequence are linked to the @dmabuf using the
10736 * dbuf list. This function looks for two major things. 1) That the first frame
10737 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10738 * set. 3) That there are no holes in the sequence count. The function will
10739 * return 1 when the sequence is complete, otherwise it will return 0.
10740 **/
10741static int
10742lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10743{
10744 struct fc_frame_header *hdr;
10745 struct lpfc_dmabuf *d_buf;
10746 struct hbq_dmabuf *seq_dmabuf;
10747 uint32_t fctl;
10748 int seq_count = 0;
10749
10750 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10751 /* make sure first fame of sequence has a sequence count of zero */
10752 if (hdr->fh_seq_cnt != seq_count)
10753 return 0;
10754 fctl = (hdr->fh_f_ctl[0] << 16 |
10755 hdr->fh_f_ctl[1] << 8 |
10756 hdr->fh_f_ctl[2]);
10757 /* If last frame of sequence we can return success. */
10758 if (fctl & FC_FC_END_SEQ)
10759 return 1;
10760 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10761 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10762 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10763 /* If there is a hole in the sequence count then fail. */
10764 if (++seq_count != hdr->fh_seq_cnt)
10765 return 0;
10766 fctl = (hdr->fh_f_ctl[0] << 16 |
10767 hdr->fh_f_ctl[1] << 8 |
10768 hdr->fh_f_ctl[2]);
10769 /* If last frame of sequence we can return success. */
10770 if (fctl & FC_FC_END_SEQ)
10771 return 1;
10772 }
10773 return 0;
10774}
10775
10776/**
10777 * lpfc_prep_seq - Prep sequence for ULP processing
10778 * @vport: Pointer to the vport on which this sequence was received
10779 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10780 *
10781 * This function takes a sequence, described by a list of frames, and creates
10782 * a list of iocbq structures to describe the sequence. This iocbq list will be
10783 * used to issue to the generic unsolicited sequence handler. This routine
10784 * returns a pointer to the first iocbq in the list. If the function is unable
10785 * to allocate an iocbq then it throw out the received frames that were not
10786 * able to be described and return a pointer to the first iocbq. If unable to
10787 * allocate any iocbqs (including the first) this function will return NULL.
10788 **/
10789static struct lpfc_iocbq *
10790lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10791{
10792 struct lpfc_dmabuf *d_buf, *n_buf;
10793 struct lpfc_iocbq *first_iocbq, *iocbq;
10794 struct fc_frame_header *fc_hdr;
10795 uint32_t sid;
10796
10797 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10798 /* remove from receive buffer list */
10799 list_del_init(&seq_dmabuf->hbuf.list);
10800 /* get the Remote Port's SID */
10801 sid = (fc_hdr->fh_s_id[0] << 16 |
10802 fc_hdr->fh_s_id[1] << 8 |
10803 fc_hdr->fh_s_id[2]);
10804 /* Get an iocbq struct to fill in. */
10805 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10806 if (first_iocbq) {
10807 /* Initialize the first IOCB. */
10808 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10809 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10810 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10811 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10812 vport->vpi + vport->phba->vpi_base;
10813 /* put the first buffer into the first IOCBq */
10814 first_iocbq->context2 = &seq_dmabuf->dbuf;
10815 first_iocbq->context3 = NULL;
10816 first_iocbq->iocb.ulpBdeCount = 1;
10817 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10818 LPFC_DATA_BUF_SIZE;
10819 first_iocbq->iocb.un.rcvels.remoteID = sid;
10820 }
10821 iocbq = first_iocbq;
10822 /*
10823 * Each IOCBq can have two Buffers assigned, so go through the list
10824 * of buffers for this sequence and save two buffers in each IOCBq
10825 */
10826 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10827 if (!iocbq) {
10828 lpfc_in_buf_free(vport->phba, d_buf);
10829 continue;
10830 }
10831 if (!iocbq->context3) {
10832 iocbq->context3 = d_buf;
10833 iocbq->iocb.ulpBdeCount++;
10834 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10835 LPFC_DATA_BUF_SIZE;
10836 } else {
10837 iocbq = lpfc_sli_get_iocbq(vport->phba);
10838 if (!iocbq) {
10839 if (first_iocbq) {
10840 first_iocbq->iocb.ulpStatus =
10841 IOSTAT_FCP_RSP_ERROR;
10842 first_iocbq->iocb.un.ulpWord[4] =
10843 IOERR_NO_RESOURCES;
10844 }
10845 lpfc_in_buf_free(vport->phba, d_buf);
10846 continue;
10847 }
10848 iocbq->context2 = d_buf;
10849 iocbq->context3 = NULL;
10850 iocbq->iocb.ulpBdeCount = 1;
10851 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10852 LPFC_DATA_BUF_SIZE;
10853 iocbq->iocb.un.rcvels.remoteID = sid;
10854 list_add_tail(&iocbq->list, &first_iocbq->list);
10855 }
10856 }
10857 return first_iocbq;
10858}
10859
10860/**
10861 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10862 * @phba: Pointer to HBA context object.
10863 *
10864 * This function is called with no lock held. This function processes all
10865 * the received buffers and gives it to upper layers when a received buffer
10866 * indicates that it is the final frame in the sequence. The interrupt
10867 * service routine processes received buffers at interrupt contexts and adds
10868 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10869 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10870 * appropriate receive function when the final frame in a sequence is received.
10871 **/
10872int
10873lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
10874{
10875 LIST_HEAD(cmplq);
10876 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
10877 struct fc_frame_header *fc_hdr;
10878 struct lpfc_vport *vport;
10879 uint32_t fcfi;
10880 struct lpfc_iocbq *iocbq;
10881
10882 /* Clear hba flag and get all received buffers into the cmplq */
10883 spin_lock_irq(&phba->hbalock);
10884 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
10885 list_splice_init(&phba->rb_pend_list, &cmplq);
10886 spin_unlock_irq(&phba->hbalock);
10887
10888 /* Process each received buffer */
10889 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
10890 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10891 /* check to see if this a valid type of frame */
10892 if (lpfc_fc_frame_check(phba, fc_hdr)) {
10893 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10894 continue;
10895 }
10896 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
10897 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
10898 if (!vport) {
10899 /* throw out the frame */
10900 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10901 continue;
10902 }
10903 /* Link this frame */
10904 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
10905 if (!seq_dmabuf) {
10906 /* unable to add frame to vport - throw it out */
10907 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10908 continue;
10909 }
10910 /* If not last frame in sequence continue processing frames. */
10911 if (!lpfc_seq_complete(seq_dmabuf)) {
10912 /*
10913 * When saving off frames post a new one and mark this
10914 * frame to be freed when it is finished.
10915 **/
10916 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
10917 dmabuf->tag = -1;
10918 continue;
10919 }
10920 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10921 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
10922 if (!lpfc_complete_unsol_iocb(phba,
10923 &phba->sli.ring[LPFC_ELS_RING],
10924 iocbq, fc_hdr->fh_r_ctl,
10925 fc_hdr->fh_type))
10926 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10927 "2540 Ring %d handler: unexpected Rctl "
10928 "x%x Type x%x received\n",
10929 LPFC_ELS_RING,
10930 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
10931 };
10932 return 0;
10933}
10934
10935/**
10936 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
10937 * @phba: pointer to lpfc hba data structure.
10938 *
10939 * This routine is invoked to post rpi header templates to the
10940 * HBA consistent with the SLI-4 interface spec. This routine
10941 * posts a PAGE_SIZE memory region to the port to hold up to
10942 * PAGE_SIZE modulo 64 rpi context headers.
10943 *
10944 * This routine does not require any locks. It's usage is expected
10945 * to be driver load or reset recovery when the driver is
10946 * sequential.
10947 *
10948 * Return codes
10949 * 0 - sucessful
10950 * EIO - The mailbox failed to complete successfully.
10951 * When this error occurs, the driver is not guaranteed
10952 * to have any rpi regions posted to the device and
10953 * must either attempt to repost the regions or take a
10954 * fatal error.
10955 **/
10956int
10957lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
10958{
10959 struct lpfc_rpi_hdr *rpi_page;
10960 uint32_t rc = 0;
10961
10962 /* Post all rpi memory regions to the port. */
10963 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
10964 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
10965 if (rc != MBX_SUCCESS) {
10966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10967 "2008 Error %d posting all rpi "
10968 "headers\n", rc);
10969 rc = -EIO;
10970 break;
10971 }
10972 }
10973
10974 return rc;
10975}
10976
10977/**
10978 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
10979 * @phba: pointer to lpfc hba data structure.
10980 * @rpi_page: pointer to the rpi memory region.
10981 *
10982 * This routine is invoked to post a single rpi header to the
10983 * HBA consistent with the SLI-4 interface spec. This memory region
10984 * maps up to 64 rpi context regions.
10985 *
10986 * Return codes
10987 * 0 - sucessful
10988 * ENOMEM - No available memory
10989 * EIO - The mailbox failed to complete successfully.
10990 **/
10991int
10992lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
10993{
10994 LPFC_MBOXQ_t *mboxq;
10995 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
10996 uint32_t rc = 0;
10997 uint32_t mbox_tmo;
10998 uint32_t shdr_status, shdr_add_status;
10999 union lpfc_sli4_cfg_shdr *shdr;
11000
11001 /* The port is notified of the header region via a mailbox command. */
11002 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11003 if (!mboxq) {
11004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11005 "2001 Unable to allocate memory for issuing "
11006 "SLI_CONFIG_SPECIAL mailbox command\n");
11007 return -ENOMEM;
11008 }
11009
11010 /* Post all rpi memory regions to the port. */
11011 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11012 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11013 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11014 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11015 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11016 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11017 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11018 hdr_tmpl, rpi_page->page_count);
11019 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11020 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable)
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11030 if (rc != MBX_TIMEOUT)
11031 mempool_free(mboxq, phba->mbox_mem_pool);
11032 if (shdr_status || shdr_add_status || rc) {
11033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11034 "2514 POST_RPI_HDR mailbox failed with "
11035 "status x%x add_status x%x, mbx status x%x\n",
11036 shdr_status, shdr_add_status, rc);
11037 rc = -ENXIO;
11038 }
11039 return rc;
11040}
11041
11042/**
11043 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11044 * @phba: pointer to lpfc hba data structure.
11045 *
11046 * This routine is invoked to post rpi header templates to the
11047 * HBA consistent with the SLI-4 interface spec. This routine
11048 * posts a PAGE_SIZE memory region to the port to hold up to
11049 * PAGE_SIZE modulo 64 rpi context headers.
11050 *
11051 * Returns
11052 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11053 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11054 **/
11055int
11056lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11057{
11058 int rpi;
11059 uint16_t max_rpi, rpi_base, rpi_limit;
11060 uint16_t rpi_remaining;
11061 struct lpfc_rpi_hdr *rpi_hdr;
11062
11063 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11064 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11065 rpi_limit = phba->sli4_hba.next_rpi;
11066
11067 /*
11068 * The valid rpi range is not guaranteed to be zero-based. Start
11069 * the search at the rpi_base as reported by the port.
11070 */
11071 spin_lock_irq(&phba->hbalock);
11072 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11073 if (rpi >= rpi_limit || rpi < rpi_base)
11074 rpi = LPFC_RPI_ALLOC_ERROR;
11075 else {
11076 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11077 phba->sli4_hba.max_cfg_param.rpi_used++;
11078 phba->sli4_hba.rpi_count++;
11079 }
11080
11081 /*
11082 * Don't try to allocate more rpi header regions if the device limit
11083 * on available rpis max has been exhausted.
11084 */
11085 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11086 (phba->sli4_hba.rpi_count >= max_rpi)) {
11087 spin_unlock_irq(&phba->hbalock);
11088 return rpi;
11089 }
11090
11091 /*
11092 * If the driver is running low on rpi resources, allocate another
11093 * page now. Note that the next_rpi value is used because
11094 * it represents how many are actually in use whereas max_rpi notes
11095 * how many are supported max by the device.
11096 */
11097 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11098 phba->sli4_hba.rpi_count;
11099 spin_unlock_irq(&phba->hbalock);
11100 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11101 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11102 if (!rpi_hdr) {
11103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11104 "2002 Error Could not grow rpi "
11105 "count\n");
11106 } else {
11107 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11108 }
11109 }
11110
11111 return rpi;
11112}
11113
11114/**
11115 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11116 * @phba: pointer to lpfc hba data structure.
11117 *
11118 * This routine is invoked to release an rpi to the pool of
11119 * available rpis maintained by the driver.
11120 **/
11121void
11122lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11123{
11124 spin_lock_irq(&phba->hbalock);
11125 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11126 phba->sli4_hba.rpi_count--;
11127 phba->sli4_hba.max_cfg_param.rpi_used--;
11128 spin_unlock_irq(&phba->hbalock);
11129}
11130
11131/**
11132 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11133 * @phba: pointer to lpfc hba data structure.
11134 *
11135 * This routine is invoked to remove the memory region that
11136 * provided rpi via a bitmask.
11137 **/
11138void
11139lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11140{
11141 kfree(phba->sli4_hba.rpi_bmask);
11142}
11143
11144/**
11145 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11146 * @phba: pointer to lpfc hba data structure.
11147 *
11148 * This routine is invoked to remove the memory region that
11149 * provided rpi via a bitmask.
11150 **/
11151int
11152lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11153{
11154 LPFC_MBOXQ_t *mboxq;
11155 struct lpfc_hba *phba = ndlp->phba;
11156 int rc;
11157
11158 /* The port is notified of the header region via a mailbox command. */
11159 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11160 if (!mboxq)
11161 return -ENOMEM;
11162
11163 /* Post all rpi memory regions to the port. */
11164 lpfc_resume_rpi(mboxq, ndlp);
11165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11166 if (rc == MBX_NOT_FINISHED) {
11167 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11168 "2010 Resume RPI Mailbox failed "
11169 "status %d, mbxStatus x%x\n", rc,
11170 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11171 mempool_free(mboxq, phba->mbox_mem_pool);
11172 return -EIO;
11173 }
11174 return 0;
11175}
11176
11177/**
11178 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11179 * @phba: pointer to lpfc hba data structure.
11180 * @vpi: vpi value to activate with the port.
11181 *
11182 * This routine is invoked to activate a vpi with the
11183 * port when the host intends to use vports with a
11184 * nonzero vpi.
11185 *
11186 * Returns:
11187 * 0 success
11188 * -Evalue otherwise
11189 **/
11190int
11191lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11192{
11193 LPFC_MBOXQ_t *mboxq;
11194 int rc = 0;
11195 uint32_t mbox_tmo;
11196
11197 if (vpi == 0)
11198 return -EINVAL;
11199 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11200 if (!mboxq)
11201 return -ENOMEM;
11202 lpfc_init_vpi(mboxq, vpi);
11203 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11205 if (rc != MBX_TIMEOUT)
11206 mempool_free(mboxq, phba->mbox_mem_pool);
11207 if (rc != MBX_SUCCESS) {
11208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11209 "2022 INIT VPI Mailbox failed "
11210 "status %d, mbxStatus x%x\n", rc,
11211 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11212 rc = -EIO;
11213 }
11214 return rc;
11215}
11216
11217/**
11218 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11219 * @phba: pointer to lpfc hba data structure.
11220 * @mboxq: Pointer to mailbox object.
11221 *
11222 * This routine is invoked to manually add a single FCF record. The caller
11223 * must pass a completely initialized FCF_Record. This routine takes
11224 * care of the nonembedded mailbox operations.
11225 **/
11226static void
11227lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11228{
11229 void *virt_addr;
11230 union lpfc_sli4_cfg_shdr *shdr;
11231 uint32_t shdr_status, shdr_add_status;
11232
11233 virt_addr = mboxq->sge_array->addr[0];
11234 /* The IOCTL status is embedded in the mailbox subheader. */
11235 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11238
11239 if ((shdr_status || shdr_add_status) &&
11240 (shdr_status != STATUS_FCF_IN_USE))
11241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11242 "2558 ADD_FCF_RECORD mailbox failed with "
11243 "status x%x add_status x%x\n",
11244 shdr_status, shdr_add_status);
11245
11246 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11247}
11248
11249/**
11250 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11251 * @phba: pointer to lpfc hba data structure.
11252 * @fcf_record: pointer to the initialized fcf record to add.
11253 *
11254 * This routine is invoked to manually add a single FCF record. The caller
11255 * must pass a completely initialized FCF_Record. This routine takes
11256 * care of the nonembedded mailbox operations.
11257 **/
11258int
11259lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11260{
11261 int rc = 0;
11262 LPFC_MBOXQ_t *mboxq;
11263 uint8_t *bytep;
11264 void *virt_addr;
11265 dma_addr_t phys_addr;
11266 struct lpfc_mbx_sge sge;
11267 uint32_t alloc_len, req_len;
11268 uint32_t fcfindex;
11269
11270 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11271 if (!mboxq) {
11272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11273 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11274 return -ENOMEM;
11275 }
11276
11277 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11278 sizeof(uint32_t);
11279
11280 /* Allocate DMA memory and set up the non-embedded mailbox command */
11281 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11282 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11283 req_len, LPFC_SLI4_MBX_NEMBED);
11284 if (alloc_len < req_len) {
11285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11286 "2523 Allocated DMA memory size (x%x) is "
11287 "less than the requested DMA memory "
11288 "size (x%x)\n", alloc_len, req_len);
11289 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11290 return -ENOMEM;
11291 }
11292
11293 /*
11294 * Get the first SGE entry from the non-embedded DMA memory. This
11295 * routine only uses a single SGE.
11296 */
11297 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11298 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11299 if (unlikely(!mboxq->sge_array)) {
11300 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11301 "2526 Failed to get the non-embedded SGE "
11302 "virtual address\n");
11303 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11304 return -ENOMEM;
11305 }
11306 virt_addr = mboxq->sge_array->addr[0];
11307 /*
11308 * Configure the FCF record for FCFI 0. This is the driver's
11309 * hardcoded default and gets used in nonFIP mode.
11310 */
11311 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11312 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11313 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11314
11315 /*
11316 * Copy the fcf_index and the FCF Record Data. The data starts after
11317 * the FCoE header plus word10. The data copy needs to be endian
11318 * correct.
11319 */
11320 bytep += sizeof(uint32_t);
11321 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11322 mboxq->vport = phba->pport;
11323 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11324 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11325 if (rc == MBX_NOT_FINISHED) {
11326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11327 "2515 ADD_FCF_RECORD mailbox failed with "
11328 "status 0x%x\n", rc);
11329 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11330 rc = -EIO;
11331 } else
11332 rc = 0;
11333
11334 return rc;
11335}
11336
11337/**
11338 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11339 * @phba: pointer to lpfc hba data structure.
11340 * @fcf_record: pointer to the fcf record to write the default data.
11341 * @fcf_index: FCF table entry index.
11342 *
11343 * This routine is invoked to build the driver's default FCF record. The
11344 * values used are hardcoded. This routine handles memory initialization.
11345 *
11346 **/
11347void
11348lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11349 struct fcf_record *fcf_record,
11350 uint16_t fcf_index)
11351{
11352 memset(fcf_record, 0, sizeof(struct fcf_record));
11353 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11354 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11355 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11356 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11357 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11358 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11359 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11360 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11361 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11362 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11369 /* Set the VLAN bit map */
11370 if (phba->valid_vlan) {
11371 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11372 = 1 << (phba->vlan_id % 8);
11373 }
11374}
11375
11376/**
11377 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11378 * @phba: pointer to lpfc hba data structure.
11379 * @fcf_index: FCF table entry offset.
11380 *
11381 * This routine is invoked to read up to @fcf_num of FCF record from the
11382 * device starting with the given @fcf_index.
11383 **/
11384int
11385lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11386{
11387 int rc = 0, error;
11388 LPFC_MBOXQ_t *mboxq;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 uint8_t *bytep;
11392 struct lpfc_mbx_sge sge;
11393 uint32_t alloc_len, req_len;
11394 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11395
11396 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11397 if (!mboxq) {
11398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11399 "2000 Failed to allocate mbox for "
11400 "READ_FCF cmd\n");
11401 return -ENOMEM;
11402 }
11403
11404 req_len = sizeof(struct fcf_record) +
11405 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11406
11407 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11408 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11409 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11410 LPFC_SLI4_MBX_NEMBED);
11411
11412 if (alloc_len < req_len) {
11413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11414 "0291 Allocated DMA memory size (x%x) is "
11415 "less than the requested DMA memory "
11416 "size (x%x)\n", alloc_len, req_len);
11417 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11418 return -ENOMEM;
11419 }
11420
11421 /* Get the first SGE entry from the non-embedded DMA memory. This
11422 * routine only uses a single SGE.
11423 */
11424 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11425 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11426 if (unlikely(!mboxq->sge_array)) {
11427 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11428 "2527 Failed to get the non-embedded SGE "
11429 "virtual address\n");
11430 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11431 return -ENOMEM;
11432 }
11433 virt_addr = mboxq->sge_array->addr[0];
11434 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11435
11436 /* Set up command fields */
11437 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11438 /* Perform necessary endian conversion */
11439 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11440 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11441 mboxq->vport = phba->pport;
11442 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11444 if (rc == MBX_NOT_FINISHED) {
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 error = -EIO;
11447 } else
11448 error = 0;
11449 return error;
11450}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6a..7d37eb7459bf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 75 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 76 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 77 struct lpfc_iocbq *);
68 78 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 79};
70 80
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 81#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 91typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 92 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 93 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 94 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 95 MAILBOX_t mb; /* Mailbox cmd */
96 struct lpfc_mqe mqe;
97 } u;
98 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 99 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 100 void *context2; /* caller context information */
88 101
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 102 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 103 uint8_t mbox_flag;
91 104 struct lpfc_mcqe mcqe;
105 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 106} LPFC_MBOXQ_t;
93 107
94#define MBX_POLL 1 /* poll mailbox till command done, then 108#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
230 244
231 /* Additional sli_flags */ 245 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 246#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 247#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 248#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 249#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 250#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
251#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 252
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 253 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 254 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
261 276
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 277#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 278 command */
279#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
280 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 281#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 282 * or erase cmds. This is especially
266 * long because of the potential of 283 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 000000000000..5196b46608d7
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e3078..6b8a148f0a55 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.2"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0aff..a6313ee84ac5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 740 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 741 struct lpfc_vport **vports;
712 int index = 0; 742 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 743 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 744 GFP_KERNEL);
715 if (vports == NULL) 745 if (vports == NULL)
716 return NULL; 746 return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 764 int i;
735 if (vports == NULL) 765 if (vports == NULL)
736 return; 766 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 767 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 768 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 769 kfree(vports);
740} 770}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 36b1d1052ba1..286c185fa9e4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -61,6 +61,7 @@
61#include <scsi/scsi_tcq.h> 61#include <scsi/scsi_tcq.h>
62#include <scsi/scsi_transport_sas.h> 62#include <scsi/scsi_transport_sas.h>
63#include <scsi/scsi_dbg.h> 63#include <scsi/scsi_dbg.h>
64#include <scsi/scsi_eh.h>
64 65
65#include "mpt2sas_debug.h" 66#include "mpt2sas_debug.h"
66 67
@@ -68,10 +69,10 @@
68#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "01.100.02.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
72#define MPT2SAS_MAJOR_VERSION 01 73#define MPT2SAS_MAJOR_VERSION 01
73#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 02 75#define MPT2SAS_BUILD_VERSION 03
75#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
76 77
77/* 78/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ba6ab170bdf0..14e473d1fa7b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
473} 473}
474 474
475/** 475/**
476 * _ctl_do_task_abort - assign an active smid to the abort_task 476 * _ctl_set_task_mid - assign an active smid to tm request
477 * @ioc: per adapter object 477 * @ioc: per adapter object
478 * @karg - (struct mpt2_ioctl_command) 478 * @karg - (struct mpt2_ioctl_command)
479 * @tm_request - pointer to mf from user space 479 * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
482 * during failure, the reply frame is filled. 482 * during failure, the reply frame is filled.
483 */ 483 */
484static int 484static int
485_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, 485_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
486 Mpi2SCSITaskManagementRequest_t *tm_request) 486 Mpi2SCSITaskManagementRequest_t *tm_request)
487{ 487{
488 u8 found = 0; 488 u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
494 Mpi2SCSITaskManagementReply_t *tm_reply; 494 Mpi2SCSITaskManagementReply_t *tm_reply;
495 u32 sz; 495 u32 sz;
496 u32 lun; 496 u32 lun;
497 char *desc = NULL;
498
499 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
500 desc = "abort_task";
501 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
502 desc = "query_task";
503 else
504 return 0;
497 505
498 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 506 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
499 507
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
517 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 525 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
518 526
519 if (!found) { 527 if (!found) {
520 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 528 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
521 "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 529 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
522 tm_request->DevHandle, lun)); 530 desc, tm_request->DevHandle, lun));
523 tm_reply = ioc->ctl_cmds.reply; 531 tm_reply = ioc->ctl_cmds.reply;
524 tm_reply->DevHandle = tm_request->DevHandle; 532 tm_reply->DevHandle = tm_request->DevHandle;
525 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 533 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 534 tm_reply->TaskType = tm_request->TaskType;
527 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 535 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
528 tm_reply->VP_ID = tm_request->VP_ID; 536 tm_reply->VP_ID = tm_request->VP_ID;
529 tm_reply->VF_ID = tm_request->VF_ID; 537 tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
535 return 1; 543 return 1;
536 } 544 }
537 545
538 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 546 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
539 "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, 547 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
540 tm_request->DevHandle, lun, tm_request->TaskMID)); 548 desc, tm_request->DevHandle, lun, tm_request->TaskMID));
541 return 0; 549 return 0;
542} 550}
543 551
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
739 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 747 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
740 748
741 if (tm_request->TaskType == 749 if (tm_request->TaskType ==
742 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 750 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
743 if (_ctl_do_task_abort(ioc, &karg, tm_request)) { 751 tm_request->TaskType ==
752 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
753 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
744 mpt2sas_base_free_smid(ioc, smid); 754 mpt2sas_base_free_smid(ioc, smid);
745 goto out; 755 goto out;
746 } 756 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index e3a7967259e7..2a01a5f2a84d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
197MODULE_DEVICE_TABLE(pci, scsih_pci_table); 197MODULE_DEVICE_TABLE(pci, scsih_pci_table);
198 198
199/** 199/**
200 * scsih_set_debug_level - global setting of ioc->logging_level. 200 * _scsih_set_debug_level - global setting of ioc->logging_level.
201 * 201 *
202 * Note: The logging levels are defined in mpt2sas_debug.h. 202 * Note: The logging levels are defined in mpt2sas_debug.h.
203 */ 203 */
204static int 204static int
205scsih_set_debug_level(const char *val, struct kernel_param *kp) 205_scsih_set_debug_level(const char *val, struct kernel_param *kp)
206{ 206{
207 int ret = param_set_int(val, kp); 207 int ret = param_set_int(val, kp);
208 struct MPT2SAS_ADAPTER *ioc; 208 struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
215 ioc->logging_level = logging_level; 215 ioc->logging_level = logging_level;
216 return 0; 216 return 0;
217} 217}
218module_param_call(logging_level, scsih_set_debug_level, param_get_int, 218module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
219 &logging_level, 0644); 219 &logging_level, 0644);
220 220
221/** 221/**
@@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
884} 884}
885 885
886/** 886/**
887 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
888 * @ioc: per adapter object
889 * @id: target id
890 * @lun: lun number
891 * @channel: channel
892 * Context: This function will acquire ioc->scsi_lookup_lock.
893 *
894 * This will search for a matching channel:id:lun in the scsi_lookup array,
895 * returning 1 if found.
896 */
897static u8
898_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
899 unsigned int lun, int channel)
900{
901 u8 found;
902 unsigned long flags;
903 int i;
904
905 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
906 found = 0;
907 for (i = 0 ; i < ioc->request_depth; i++) {
908 if (ioc->scsi_lookup[i].scmd &&
909 (ioc->scsi_lookup[i].scmd->device->id == id &&
910 ioc->scsi_lookup[i].scmd->device->channel == channel &&
911 ioc->scsi_lookup[i].scmd->device->lun == lun)) {
912 found = 1;
913 goto out;
914 }
915 }
916 out:
917 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
918 return found;
919}
920
921/**
887 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) 922 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
888 * @ioc: per adapter object 923 * @ioc: per adapter object
889 * @smid: system request message index 924 * @smid: system request message index
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1047} 1082}
1048 1083
1049/** 1084/**
1050 * scsih_change_queue_depth - setting device queue depth 1085 * _scsih_change_queue_depth - setting device queue depth
1051 * @sdev: scsi device struct 1086 * @sdev: scsi device struct
1052 * @qdepth: requested queue depth 1087 * @qdepth: requested queue depth
1053 * 1088 *
1054 * Returns queue depth. 1089 * Returns queue depth.
1055 */ 1090 */
1056static int 1091static int
1057scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1092_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1058{ 1093{
1059 struct Scsi_Host *shost = sdev->host; 1094 struct Scsi_Host *shost = sdev->host;
1060 int max_depth; 1095 int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1079} 1114}
1080 1115
1081/** 1116/**
1082 * scsih_change_queue_depth - changing device queue tag type 1117 * _scsih_change_queue_depth - changing device queue tag type
1083 * @sdev: scsi device struct 1118 * @sdev: scsi device struct
1084 * @tag_type: requested tag type 1119 * @tag_type: requested tag type
1085 * 1120 *
1086 * Returns queue tag type. 1121 * Returns queue tag type.
1087 */ 1122 */
1088static int 1123static int
1089scsih_change_queue_type(struct scsi_device *sdev, int tag_type) 1124_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1090{ 1125{
1091 if (sdev->tagged_supported) { 1126 if (sdev->tagged_supported) {
1092 scsi_set_tag_type(sdev, tag_type); 1127 scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1101} 1136}
1102 1137
1103/** 1138/**
1104 * scsih_target_alloc - target add routine 1139 * _scsih_target_alloc - target add routine
1105 * @starget: scsi target struct 1140 * @starget: scsi target struct
1106 * 1141 *
1107 * Returns 0 if ok. Any other return is assumed to be an error and 1142 * Returns 0 if ok. Any other return is assumed to be an error and
1108 * the device is ignored. 1143 * the device is ignored.
1109 */ 1144 */
1110static int 1145static int
1111scsih_target_alloc(struct scsi_target *starget) 1146_scsih_target_alloc(struct scsi_target *starget)
1112{ 1147{
1113 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1148 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1114 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1149 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
1163} 1198}
1164 1199
1165/** 1200/**
1166 * scsih_target_destroy - target destroy routine 1201 * _scsih_target_destroy - target destroy routine
1167 * @starget: scsi target struct 1202 * @starget: scsi target struct
1168 * 1203 *
1169 * Returns nothing. 1204 * Returns nothing.
1170 */ 1205 */
1171static void 1206static void
1172scsih_target_destroy(struct scsi_target *starget) 1207_scsih_target_destroy(struct scsi_target *starget)
1173{ 1208{
1174 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1209 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1175 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1210 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
1212} 1247}
1213 1248
1214/** 1249/**
1215 * scsih_slave_alloc - device add routine 1250 * _scsih_slave_alloc - device add routine
1216 * @sdev: scsi device struct 1251 * @sdev: scsi device struct
1217 * 1252 *
1218 * Returns 0 if ok. Any other return is assumed to be an error and 1253 * Returns 0 if ok. Any other return is assumed to be an error and
1219 * the device is ignored. 1254 * the device is ignored.
1220 */ 1255 */
1221static int 1256static int
1222scsih_slave_alloc(struct scsi_device *sdev) 1257_scsih_slave_alloc(struct scsi_device *sdev)
1223{ 1258{
1224 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1225 struct MPT2SAS_ADAPTER *ioc; 1260 struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
1273} 1308}
1274 1309
1275/** 1310/**
1276 * scsih_slave_destroy - device destroy routine 1311 * _scsih_slave_destroy - device destroy routine
1277 * @sdev: scsi device struct 1312 * @sdev: scsi device struct
1278 * 1313 *
1279 * Returns nothing. 1314 * Returns nothing.
1280 */ 1315 */
1281static void 1316static void
1282scsih_slave_destroy(struct scsi_device *sdev) 1317_scsih_slave_destroy(struct scsi_device *sdev)
1283{ 1318{
1284 struct MPT2SAS_TARGET *sas_target_priv_data; 1319 struct MPT2SAS_TARGET *sas_target_priv_data;
1285 struct scsi_target *starget; 1320 struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
1295} 1330}
1296 1331
1297/** 1332/**
1298 * scsih_display_sata_capabilities - sata capabilities 1333 * _scsih_display_sata_capabilities - sata capabilities
1299 * @ioc: per adapter object 1334 * @ioc: per adapter object
1300 * @sas_device: the sas_device object 1335 * @sas_device: the sas_device object
1301 * @sdev: scsi device struct 1336 * @sdev: scsi device struct
1302 */ 1337 */
1303static void 1338static void
1304scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, 1339_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1305 struct _sas_device *sas_device, struct scsi_device *sdev) 1340 struct _sas_device *sas_device, struct scsi_device *sdev)
1306{ 1341{
1307 Mpi2ConfigReply_t mpi_reply; 1342 Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1401} 1436}
1402 1437
1403/** 1438/**
1404 * scsih_slave_configure - device configure routine. 1439 * _scsih_slave_configure - device configure routine.
1405 * @sdev: scsi device struct 1440 * @sdev: scsi device struct
1406 * 1441 *
1407 * Returns 0 if ok. Any other return is assumed to be an error and 1442 * Returns 0 if ok. Any other return is assumed to be an error and
1408 * the device is ignored. 1443 * the device is ignored.
1409 */ 1444 */
1410static int 1445static int
1411scsih_slave_configure(struct scsi_device *sdev) 1446_scsih_slave_configure(struct scsi_device *sdev)
1412{ 1447{
1413 struct Scsi_Host *shost = sdev->host; 1448 struct Scsi_Host *shost = sdev->host;
1414 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1449 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1489 r_level, raid_device->handle, 1524 r_level, raid_device->handle,
1490 (unsigned long long)raid_device->wwid, 1525 (unsigned long long)raid_device->wwid,
1491 raid_device->num_pds, ds); 1526 raid_device->num_pds, ds);
1492 scsih_change_queue_depth(sdev, qdepth); 1527 _scsih_change_queue_depth(sdev, qdepth);
1493 return 0; 1528 return 0;
1494 } 1529 }
1495 1530
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
1532 sas_device->slot); 1567 sas_device->slot);
1533 1568
1534 if (!ssp_target) 1569 if (!ssp_target)
1535 scsih_display_sata_capabilities(ioc, sas_device, sdev); 1570 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
1536 } 1571 }
1537 1572
1538 scsih_change_queue_depth(sdev, qdepth); 1573 _scsih_change_queue_depth(sdev, qdepth);
1539 1574
1540 if (ssp_target) 1575 if (ssp_target)
1541 sas_read_port_mode_page(sdev); 1576 sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1543} 1578}
1544 1579
1545/** 1580/**
1546 * scsih_bios_param - fetch head, sector, cylinder info for a disk 1581 * _scsih_bios_param - fetch head, sector, cylinder info for a disk
1547 * @sdev: scsi device struct 1582 * @sdev: scsi device struct
1548 * @bdev: pointer to block device context 1583 * @bdev: pointer to block device context
1549 * @capacity: device size (in 512 byte sectors) 1584 * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1555 * Return nothing. 1590 * Return nothing.
1556 */ 1591 */
1557static int 1592static int
1558scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 1593_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1559 sector_t capacity, int params[]) 1594 sector_t capacity, int params[])
1560{ 1595{
1561 int heads; 1596 int heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1636} 1671}
1637 1672
1638/** 1673/**
1639 * scsih_tm_done - tm completion routine 1674 * _scsih_tm_done - tm completion routine
1640 * @ioc: per adapter object 1675 * @ioc: per adapter object
1641 * @smid: system request message index 1676 * @smid: system request message index
1642 * @VF_ID: virtual function id 1677 * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1648 * Return nothing. 1683 * Return nothing.
1649 */ 1684 */
1650static void 1685static void
1651scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 1686_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1652{ 1687{
1653 MPI2DefaultReply_t *mpi_reply; 1688 MPI2DefaultReply_t *mpi_reply;
1654 1689
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1823} 1858}
1824 1859
1825/** 1860/**
1826 * scsih_abort - eh threads main abort routine 1861 * _scsih_abort - eh threads main abort routine
1827 * @sdev: scsi device struct 1862 * @sdev: scsi device struct
1828 * 1863 *
1829 * Returns SUCCESS if command aborted else FAILED 1864 * Returns SUCCESS if command aborted else FAILED
1830 */ 1865 */
1831static int 1866static int
1832scsih_abort(struct scsi_cmnd *scmd) 1867_scsih_abort(struct scsi_cmnd *scmd)
1833{ 1868{
1834 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 1869 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1835 struct MPT2SAS_DEVICE *sas_device_priv_data; 1870 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
1889 return r; 1924 return r;
1890} 1925}
1891 1926
1927/**
1928 * _scsih_dev_reset - eh threads main device reset routine
1929 * @sdev: scsi device struct
1930 *
1931 * Returns SUCCESS if command aborted else FAILED
1932 */
1933static int
1934_scsih_dev_reset(struct scsi_cmnd *scmd)
1935{
1936 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1937 struct MPT2SAS_DEVICE *sas_device_priv_data;
1938 struct _sas_device *sas_device;
1939 unsigned long flags;
1940 u16 handle;
1941 int r;
1942
1943 printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
1944 ioc->name, scmd);
1945 scsi_print_command(scmd);
1946
1947 sas_device_priv_data = scmd->device->hostdata;
1948 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1949 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1950 ioc->name, scmd);
1951 scmd->result = DID_NO_CONNECT << 16;
1952 scmd->scsi_done(scmd);
1953 r = SUCCESS;
1954 goto out;
1955 }
1956
1957 /* for hidden raid components obtain the volume_handle */
1958 handle = 0;
1959 if (sas_device_priv_data->sas_target->flags &
1960 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1961 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1962 sas_device = _scsih_sas_device_find_by_handle(ioc,
1963 sas_device_priv_data->sas_target->handle);
1964 if (sas_device)
1965 handle = sas_device->volume_handle;
1966 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1967 } else
1968 handle = sas_device_priv_data->sas_target->handle;
1969
1970 if (!handle) {
1971 scmd->result = DID_RESET << 16;
1972 r = FAILED;
1973 goto out;
1974 }
1975
1976 mutex_lock(&ioc->tm_cmds.mutex);
1977 mpt2sas_scsih_issue_tm(ioc, handle, 0,
1978 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
1979 30);
1980
1981 /*
1982 * sanity check see whether all commands to this device been
1983 * completed
1984 */
1985 if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
1986 scmd->device->lun, scmd->device->channel))
1987 r = FAILED;
1988 else
1989 r = SUCCESS;
1990 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1991 mutex_unlock(&ioc->tm_cmds.mutex);
1992
1993 out:
1994 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
1995 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1996 return r;
1997}
1892 1998
1893/** 1999/**
1894 * scsih_dev_reset - eh threads main device reset routine 2000 * _scsih_target_reset - eh threads main target reset routine
1895 * @sdev: scsi device struct 2001 * @sdev: scsi device struct
1896 * 2002 *
1897 * Returns SUCCESS if command aborted else FAILED 2003 * Returns SUCCESS if command aborted else FAILED
1898 */ 2004 */
1899static int 2005static int
1900scsih_dev_reset(struct scsi_cmnd *scmd) 2006_scsih_target_reset(struct scsi_cmnd *scmd)
1901{ 2007{
1902 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2008 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1903 struct MPT2SAS_DEVICE *sas_device_priv_data; 2009 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1912 2018
1913 sas_device_priv_data = scmd->device->hostdata; 2019 sas_device_priv_data = scmd->device->hostdata;
1914 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2020 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1915 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2021 printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
1916 ioc->name, scmd); 2022 ioc->name, scmd);
1917 scmd->result = DID_NO_CONNECT << 16; 2023 scmd->result = DID_NO_CONNECT << 16;
1918 scmd->scsi_done(scmd); 2024 scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1962} 2068}
1963 2069
1964/** 2070/**
1965 * scsih_abort - eh threads main host reset routine 2071 * _scsih_abort - eh threads main host reset routine
1966 * @sdev: scsi device struct 2072 * @sdev: scsi device struct
1967 * 2073 *
1968 * Returns SUCCESS if command aborted else FAILED 2074 * Returns SUCCESS if command aborted else FAILED
1969 */ 2075 */
1970static int 2076static int
1971scsih_host_reset(struct scsi_cmnd *scmd) 2077_scsih_host_reset(struct scsi_cmnd *scmd)
1972{ 2078{
1973 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2079 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1974 int r, retval; 2080 int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2390} 2496}
2391 2497
2392/** 2498/**
2393 * scsih_qcmd - main scsi request entry point 2499 * _scsih_setup_eedp - setup MPI request for EEDP transfer
2500 * @scmd: pointer to scsi command object
2501 * @mpi_request: pointer to the SCSI_IO reqest message frame
2502 *
2503 * Supporting protection 1 and 3.
2504 *
2505 * Returns nothing
2506 */
2507static void
2508_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2509{
2510 u16 eedp_flags;
2511 unsigned char prot_op = scsi_get_prot_op(scmd);
2512 unsigned char prot_type = scsi_get_prot_type(scmd);
2513
2514 if (prot_type == SCSI_PROT_DIF_TYPE0 ||
2515 prot_type == SCSI_PROT_DIF_TYPE2 ||
2516 prot_op == SCSI_PROT_NORMAL)
2517 return;
2518
2519 if (prot_op == SCSI_PROT_READ_STRIP)
2520 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
2521 else if (prot_op == SCSI_PROT_WRITE_INSERT)
2522 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2523 else
2524 return;
2525
2526 mpi_request->EEDPBlockSize = scmd->device->sector_size;
2527
2528 switch (prot_type) {
2529 case SCSI_PROT_DIF_TYPE1:
2530
2531 /*
2532 * enable ref/guard checking
2533 * auto increment ref tag
2534 */
2535 mpi_request->EEDPFlags = eedp_flags |
2536 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2537 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2538 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2539 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
2540 cpu_to_be32(scsi_get_lba(scmd));
2541
2542 break;
2543
2544 case SCSI_PROT_DIF_TYPE3:
2545
2546 /*
2547 * enable guard checking
2548 */
2549 mpi_request->EEDPFlags = eedp_flags |
2550 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2551
2552 break;
2553 }
2554}
2555
2556/**
2557 * _scsih_eedp_error_handling - return sense code for EEDP errors
2558 * @scmd: pointer to scsi command object
2559 * @ioc_status: ioc status
2560 *
2561 * Returns nothing
2562 */
2563static void
2564_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
2565{
2566 u8 ascq;
2567 u8 sk;
2568 u8 host_byte;
2569
2570 switch (ioc_status) {
2571 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2572 ascq = 0x01;
2573 break;
2574 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2575 ascq = 0x02;
2576 break;
2577 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2578 ascq = 0x03;
2579 break;
2580 default:
2581 ascq = 0x00;
2582 break;
2583 }
2584
2585 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2586 sk = ILLEGAL_REQUEST;
2587 host_byte = DID_ABORT;
2588 } else {
2589 sk = ABORTED_COMMAND;
2590 host_byte = DID_OK;
2591 }
2592
2593 scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
2594 scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
2595 SAM_STAT_CHECK_CONDITION;
2596}
2597
2598/**
2599 * _scsih_qcmd - main scsi request entry point
2394 * @scmd: pointer to scsi command object 2600 * @scmd: pointer to scsi command object
2395 * @done: function pointer to be invoked on completion 2601 * @done: function pointer to be invoked on completion
2396 * 2602 *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2401 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 2607 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
2402 */ 2608 */
2403static int 2609static int
2404scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 2610_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2405{ 2611{
2406 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2612 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2407 struct MPT2SAS_DEVICE *sas_device_priv_data; 2613 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2470 } 2676 }
2471 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2677 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2472 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 2678 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
2679 _scsih_setup_eedp(scmd, mpi_request);
2473 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2680 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2474 if (sas_device_priv_data->sas_target->flags & 2681 if (sas_device_priv_data->sas_target->flags &
2475 MPT_TARGET_FLAGS_RAID_COMPONENT) 2682 MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2604 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2811 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2605 desc_ioc_state = "scsi ext terminated"; 2812 desc_ioc_state = "scsi ext terminated";
2606 break; 2813 break;
2814 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2815 desc_ioc_state = "eedp guard error";
2816 break;
2817 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2818 desc_ioc_state = "eedp ref tag error";
2819 break;
2820 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2821 desc_ioc_state = "eedp app tag error";
2822 break;
2607 default: 2823 default:
2608 desc_ioc_state = "unknown"; 2824 desc_ioc_state = "unknown";
2609 break; 2825 break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2783} 2999}
2784 3000
2785/** 3001/**
2786 * scsih_io_done - scsi request callback 3002 * _scsih_io_done - scsi request callback
2787 * @ioc: per adapter object 3003 * @ioc: per adapter object
2788 * @smid: system request message index 3004 * @smid: system request message index
2789 * @VF_ID: virtual function id 3005 * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2794 * Return nothing. 3010 * Return nothing.
2795 */ 3011 */
2796static void 3012static void
2797scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 3013_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2798{ 3014{
2799 Mpi2SCSIIORequest_t *mpi_request; 3015 Mpi2SCSIIORequest_t *mpi_request;
2800 Mpi2SCSIIOReply_t *mpi_reply; 3016 Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2939 scmd->result = DID_RESET << 16; 3155 scmd->result = DID_RESET << 16;
2940 break; 3156 break;
2941 3157
3158 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
3159 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
3160 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
3161 _scsih_eedp_error_handling(scmd, ioc_status);
3162 break;
2942 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3163 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2943 case MPI2_IOCSTATUS_INVALID_FUNCTION: 3164 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2944 case MPI2_IOCSTATUS_INVALID_SGL: 3165 case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
5130 .module = THIS_MODULE, 5351 .module = THIS_MODULE,
5131 .name = "Fusion MPT SAS Host", 5352 .name = "Fusion MPT SAS Host",
5132 .proc_name = MPT2SAS_DRIVER_NAME, 5353 .proc_name = MPT2SAS_DRIVER_NAME,
5133 .queuecommand = scsih_qcmd, 5354 .queuecommand = _scsih_qcmd,
5134 .target_alloc = scsih_target_alloc, 5355 .target_alloc = _scsih_target_alloc,
5135 .slave_alloc = scsih_slave_alloc, 5356 .slave_alloc = _scsih_slave_alloc,
5136 .slave_configure = scsih_slave_configure, 5357 .slave_configure = _scsih_slave_configure,
5137 .target_destroy = scsih_target_destroy, 5358 .target_destroy = _scsih_target_destroy,
5138 .slave_destroy = scsih_slave_destroy, 5359 .slave_destroy = _scsih_slave_destroy,
5139 .change_queue_depth = scsih_change_queue_depth, 5360 .change_queue_depth = _scsih_change_queue_depth,
5140 .change_queue_type = scsih_change_queue_type, 5361 .change_queue_type = _scsih_change_queue_type,
5141 .eh_abort_handler = scsih_abort, 5362 .eh_abort_handler = _scsih_abort,
5142 .eh_device_reset_handler = scsih_dev_reset, 5363 .eh_device_reset_handler = _scsih_dev_reset,
5143 .eh_host_reset_handler = scsih_host_reset, 5364 .eh_target_reset_handler = _scsih_target_reset,
5144 .bios_param = scsih_bios_param, 5365 .eh_host_reset_handler = _scsih_host_reset,
5366 .bios_param = _scsih_bios_param,
5145 .can_queue = 1, 5367 .can_queue = 1,
5146 .this_id = -1, 5368 .this_id = -1,
5147 .sg_tablesize = MPT2SAS_SG_DEPTH, 5369 .sg_tablesize = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5228} 5450}
5229 5451
5230/** 5452/**
5231 * scsih_remove - detach and remove add host 5453 * _scsih_remove - detach and remove add host
5232 * @pdev: PCI device struct 5454 * @pdev: PCI device struct
5233 * 5455 *
5234 * Return nothing. 5456 * Return nothing.
5235 */ 5457 */
5236static void __devexit 5458static void __devexit
5237scsih_remove(struct pci_dev *pdev) 5459_scsih_remove(struct pci_dev *pdev)
5238{ 5460{
5239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5461 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5240 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5462 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
5442} 5664}
5443 5665
5444/** 5666/**
5445 * scsih_probe - attach and add scsi host 5667 * _scsih_probe - attach and add scsi host
5446 * @pdev: PCI device struct 5668 * @pdev: PCI device struct
5447 * @id: pci device id 5669 * @id: pci device id
5448 * 5670 *
5449 * Returns 0 success, anything else error. 5671 * Returns 0 success, anything else error.
5450 */ 5672 */
5451static int 5673static int
5452scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5674_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5453{ 5675{
5454 struct MPT2SAS_ADAPTER *ioc; 5676 struct MPT2SAS_ADAPTER *ioc;
5455 struct Scsi_Host *shost; 5677 struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5503 goto out_add_shost_fail; 5725 goto out_add_shost_fail;
5504 } 5726 }
5505 5727
5728 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
5729 | SHOST_DIF_TYPE3_PROTECTION);
5730
5506 /* event thread */ 5731 /* event thread */
5507 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 5732 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
5508 "fw_event%d", ioc->id); 5733 "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5536 5761
5537#ifdef CONFIG_PM 5762#ifdef CONFIG_PM
5538/** 5763/**
5539 * scsih_suspend - power management suspend main entry point 5764 * _scsih_suspend - power management suspend main entry point
5540 * @pdev: PCI device struct 5765 * @pdev: PCI device struct
5541 * @state: PM state change to (usually PCI_D3) 5766 * @state: PM state change to (usually PCI_D3)
5542 * 5767 *
5543 * Returns 0 success, anything else error. 5768 * Returns 0 success, anything else error.
5544 */ 5769 */
5545static int 5770static int
5546scsih_suspend(struct pci_dev *pdev, pm_message_t state) 5771_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5547{ 5772{
5548 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5773 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5549 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5774 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5564} 5789}
5565 5790
5566/** 5791/**
5567 * scsih_resume - power management resume main entry point 5792 * _scsih_resume - power management resume main entry point
5568 * @pdev: PCI device struct 5793 * @pdev: PCI device struct
5569 * 5794 *
5570 * Returns 0 success, anything else error. 5795 * Returns 0 success, anything else error.
5571 */ 5796 */
5572static int 5797static int
5573scsih_resume(struct pci_dev *pdev) 5798_scsih_resume(struct pci_dev *pdev)
5574{ 5799{
5575 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5800 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5576 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5801 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
5599static struct pci_driver scsih_driver = { 5824static struct pci_driver scsih_driver = {
5600 .name = MPT2SAS_DRIVER_NAME, 5825 .name = MPT2SAS_DRIVER_NAME,
5601 .id_table = scsih_pci_table, 5826 .id_table = scsih_pci_table,
5602 .probe = scsih_probe, 5827 .probe = _scsih_probe,
5603 .remove = __devexit_p(scsih_remove), 5828 .remove = __devexit_p(_scsih_remove),
5604#ifdef CONFIG_PM 5829#ifdef CONFIG_PM
5605 .suspend = scsih_suspend, 5830 .suspend = _scsih_suspend,
5606 .resume = scsih_resume, 5831 .resume = _scsih_resume,
5607#endif 5832#endif
5608}; 5833};
5609 5834
5610 5835
5611/** 5836/**
5612 * scsih_init - main entry point for this driver. 5837 * _scsih_init - main entry point for this driver.
5613 * 5838 *
5614 * Returns 0 success, anything else error. 5839 * Returns 0 success, anything else error.
5615 */ 5840 */
5616static int __init 5841static int __init
5617scsih_init(void) 5842_scsih_init(void)
5618{ 5843{
5619 int error; 5844 int error;
5620 5845
@@ -5630,10 +5855,10 @@ scsih_init(void)
5630 mpt2sas_base_initialize_callback_handler(); 5855 mpt2sas_base_initialize_callback_handler();
5631 5856
5632 /* queuecommand callback hander */ 5857 /* queuecommand callback hander */
5633 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); 5858 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
5634 5859
5635 /* task managment callback handler */ 5860 /* task managment callback handler */
5636 tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); 5861 tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
5637 5862
5638 /* base internal commands callback handler */ 5863 /* base internal commands callback handler */
5639 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); 5864 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
5659} 5884}
5660 5885
5661/** 5886/**
5662 * scsih_exit - exit point for this driver (when it is a module). 5887 * _scsih_exit - exit point for this driver (when it is a module).
5663 * 5888 *
5664 * Returns 0 success, anything else error. 5889 * Returns 0 success, anything else error.
5665 */ 5890 */
5666static void __exit 5891static void __exit
5667scsih_exit(void) 5892_scsih_exit(void)
5668{ 5893{
5669 printk(KERN_INFO "mpt2sas version %s unloading\n", 5894 printk(KERN_INFO "mpt2sas version %s unloading\n",
5670 MPT2SAS_DRIVER_VERSION); 5895 MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
5682 mpt2sas_ctl_exit(); 5907 mpt2sas_ctl_exit();
5683} 5908}
5684 5909
5685module_init(scsih_init); 5910module_init(_scsih_init);
5686module_exit(scsih_exit); 5911module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 5c65da519e39..686695b155c7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -264,7 +264,7 @@ struct rep_manu_reply{
264}; 264};
265 265
266/** 266/**
267 * transport_expander_report_manufacture - obtain SMP report_manufacture 267 * _transport_expander_report_manufacture - obtain SMP report_manufacture
268 * @ioc: per adapter object 268 * @ioc: per adapter object
269 * @sas_address: expander sas address 269 * @sas_address: expander sas address
270 * @edev: the sas_expander_device object 270 * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
274 * Returns 0 for success, non-zero for failure. 274 * Returns 0 for success, non-zero for failure.
275 */ 275 */
276static int 276static int
277transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, 277_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
278 u64 sas_address, struct sas_expander_device *edev) 278 u64 sas_address, struct sas_expander_device *edev)
279{ 279{
280 Mpi2SmpPassthroughRequest_t *mpi_request; 280 Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || 578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
579 mpt2sas_port->remote_identify.device_type == 579 mpt2sas_port->remote_identify.device_type ==
580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) 580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
581 transport_expander_report_manufacture(ioc, 581 _transport_expander_report_manufacture(ioc,
582 mpt2sas_port->remote_identify.sas_address, 582 mpt2sas_port->remote_identify.sas_address,
583 rphy_to_expander_device(rphy)); 583 rphy_to_expander_device(rphy));
584 584
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
852} 852}
853 853
854/** 854/**
855 * transport_get_linkerrors - 855 * _transport_get_linkerrors -
856 * @phy: The sas phy object 856 * @phy: The sas phy object
857 * 857 *
858 * Only support sas_host direct attached phys. 858 * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
860 * 860 *
861 */ 861 */
862static int 862static int
863transport_get_linkerrors(struct sas_phy *phy) 863_transport_get_linkerrors(struct sas_phy *phy)
864{ 864{
865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
866 struct _sas_phy *mpt2sas_phy; 866 struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
903} 903}
904 904
905/** 905/**
906 * transport_get_enclosure_identifier - 906 * _transport_get_enclosure_identifier -
907 * @phy: The sas phy object 907 * @phy: The sas phy object
908 * 908 *
909 * Obtain the enclosure logical id for an expander. 909 * Obtain the enclosure logical id for an expander.
910 * Returns 0 for success, non-zero for failure. 910 * Returns 0 for success, non-zero for failure.
911 */ 911 */
912static int 912static int
913transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 913_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
914{ 914{
915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
916 struct _sas_node *sas_expander; 916 struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
929} 929}
930 930
931/** 931/**
932 * transport_get_bay_identifier - 932 * _transport_get_bay_identifier -
933 * @phy: The sas phy object 933 * @phy: The sas phy object
934 * 934 *
935 * Returns the slot id for a device that resides inside an enclosure. 935 * Returns the slot id for a device that resides inside an enclosure.
936 */ 936 */
937static int 937static int
938transport_get_bay_identifier(struct sas_rphy *rphy) 938_transport_get_bay_identifier(struct sas_rphy *rphy)
939{ 939{
940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
941 struct _sas_device *sas_device; 941 struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
953} 953}
954 954
955/** 955/**
956 * transport_phy_reset - 956 * _transport_phy_reset -
957 * @phy: The sas phy object 957 * @phy: The sas phy object
958 * @hard_reset: 958 * @hard_reset:
959 * 959 *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
961 * Returns 0 for success, non-zero for failure. 961 * Returns 0 for success, non-zero for failure.
962 */ 962 */
963static int 963static int
964transport_phy_reset(struct sas_phy *phy, int hard_reset) 964_transport_phy_reset(struct sas_phy *phy, int hard_reset)
965{ 965{
966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
967 struct _sas_phy *mpt2sas_phy; 967 struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1002} 1002}
1003 1003
1004/** 1004/**
1005 * transport_smp_handler - transport portal for smp passthru 1005 * _transport_smp_handler - transport portal for smp passthru
1006 * @shost: shost object 1006 * @shost: shost object
1007 * @rphy: sas transport rphy object 1007 * @rphy: sas transport rphy object
1008 * @req: 1008 * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1012 * smp_rep_general /sys/class/bsg/expander-5:0 1012 * smp_rep_general /sys/class/bsg/expander-5:0
1013 */ 1013 */
1014static int 1014static int
1015transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 1015_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1016 struct request *req) 1016 struct request *req)
1017{ 1017{
1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1200,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1200} 1200}
1201 1201
1202struct sas_function_template mpt2sas_transport_functions = { 1202struct sas_function_template mpt2sas_transport_functions = {
1203 .get_linkerrors = transport_get_linkerrors, 1203 .get_linkerrors = _transport_get_linkerrors,
1204 .get_enclosure_identifier = transport_get_enclosure_identifier, 1204 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1205 .get_bay_identifier = transport_get_bay_identifier, 1205 .get_bay_identifier = _transport_get_bay_identifier,
1206 .phy_reset = transport_phy_reset, 1206 .phy_reset = _transport_phy_reset,
1207 .smp_handler = transport_smp_handler, 1207 .smp_handler = _transport_smp_handler,
1208}; 1208};
1209 1209
1210struct scsi_transport_template *mpt2sas_transport_template; 1210struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1b..000000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/sas_ata.h>
42#include <asm/io.h>
43
44#define DRV_NAME "mvsas"
45#define DRV_VERSION "0.5.2"
46#define _MV_DUMP 0
47#define MVS_DISABLE_NVRAM
48#define MVS_DISABLE_MSI
49
50#define mr32(reg) readl(regs + MVS_##reg)
51#define mw32(reg,val) writel((val), regs + MVS_##reg)
52#define mw32_f(reg,val) do { \
53 writel((val), regs + MVS_##reg); \
54 readl(regs + MVS_##reg); \
55 } while (0)
56
57#define MVS_ID_NOT_MAPPED 0x7f
58#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
59
60/* offset for D2H FIS in the Received FIS List Structure */
61#define SATA_RECEIVED_D2H_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
63#define SATA_RECEIVED_PIO_FIS(reg_set) \
64 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
65#define UNASSOC_D2H_FIS(id) \
66 ((void *) mvi->rx_fis + 0x100 * id)
67
68#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
69 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
70 (__mc) != 0 && __rest; \
71 (++__lseq), (__mc) >>= 1)
72
73/* driver compile-time configuration */
74enum driver_configuration {
75 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
76 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
77 /* software requires power-of-2
78 ring size */
79
80 MVS_SLOTS = 512, /* command slots */
81 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
82 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
83 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
84 MVS_OAF_SZ = 64, /* Open address frame buffer size */
85
86 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
87
88 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
89 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
90};
91
92/* unchangeable hardware details */
93enum hardware_details {
94 MVS_MAX_PHYS = 8, /* max. possible phys */
95 MVS_MAX_PORTS = 8, /* max. possible ports */
96 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
97};
98
99/* peripheral registers (BAR2) */
100enum peripheral_registers {
101 SPI_CTL = 0x10, /* EEPROM control */
102 SPI_CMD = 0x14, /* EEPROM command */
103 SPI_DATA = 0x18, /* EEPROM data */
104};
105
106enum peripheral_register_bits {
107 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
108 TWSI_RD = (1U << 4), /* EEPROM read access */
109
110 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
111};
112
113/* enhanced mode registers (BAR4) */
114enum hw_registers {
115 MVS_GBL_CTL = 0x04, /* global control */
116 MVS_GBL_INT_STAT = 0x08, /* global irq status */
117 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
118 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
119
120 MVS_CTL = 0x100, /* SAS/SATA port configuration */
121 MVS_PCS = 0x104, /* SAS/SATA port control/status */
122 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
123 MVS_CMD_LIST_HI = 0x10C,
124 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
125 MVS_RX_FIS_HI = 0x114,
126
127 MVS_TX_CFG = 0x120, /* TX configuration */
128 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
129 MVS_TX_HI = 0x128,
130
131 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
132 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
133 MVS_RX_CFG = 0x134, /* RX configuration */
134 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
135 MVS_RX_HI = 0x13C,
136 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
137
138 MVS_INT_COAL = 0x148, /* Int coalescing config */
139 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
140 MVS_INT_STAT = 0x150, /* Central int status */
141 MVS_INT_MASK = 0x154, /* Central int enable */
142 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
143 MVS_INT_MASK_SRS = 0x15C,
144
145 /* ports 1-3 follow after this */
146 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
147 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
148 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
149 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
150
151 /* ports 1-3 follow after this */
152 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
153 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
154
155 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
156 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
157
158 /* ports 1-3 follow after this */
159 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
160 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
161 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
162 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
163
164 /* ports 1-3 follow after this */
165 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
166 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
167 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
168 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
169};
170
171enum hw_register_bits {
172 /* MVS_GBL_CTL */
173 INT_EN = (1U << 1), /* Global int enable */
174 HBA_RST = (1U << 0), /* HBA reset */
175
176 /* MVS_GBL_INT_STAT */
177 INT_XOR = (1U << 4), /* XOR engine event */
178 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
179
180 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
181 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
182 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
183 MODE_AUTO_DET_PORT6 = (1U << 14),
184 MODE_AUTO_DET_PORT5 = (1U << 13),
185 MODE_AUTO_DET_PORT4 = (1U << 12),
186 MODE_AUTO_DET_PORT3 = (1U << 11),
187 MODE_AUTO_DET_PORT2 = (1U << 10),
188 MODE_AUTO_DET_PORT1 = (1U << 9),
189 MODE_AUTO_DET_PORT0 = (1U << 8),
190 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
191 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
192 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
193 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
194 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
195 MODE_SAS_PORT6_MASK = (1U << 6),
196 MODE_SAS_PORT5_MASK = (1U << 5),
197 MODE_SAS_PORT4_MASK = (1U << 4),
198 MODE_SAS_PORT3_MASK = (1U << 3),
199 MODE_SAS_PORT2_MASK = (1U << 2),
200 MODE_SAS_PORT1_MASK = (1U << 1),
201 MODE_SAS_PORT0_MASK = (1U << 0),
202 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
203 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
204 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
205 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
206
207 /* SAS_MODE value may be
208 * dictated (in hw) by values
209 * of SATA_TARGET & AUTO_DET
210 */
211
212 /* MVS_TX_CFG */
213 TX_EN = (1U << 16), /* Enable TX */
214 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
215
216 /* MVS_RX_CFG */
217 RX_EN = (1U << 16), /* Enable RX */
218 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
219
220 /* MVS_INT_COAL */
221 COAL_EN = (1U << 16), /* Enable int coalescing */
222
223 /* MVS_INT_STAT, MVS_INT_MASK */
224 CINT_I2C = (1U << 31), /* I2C event */
225 CINT_SW0 = (1U << 30), /* software event 0 */
226 CINT_SW1 = (1U << 29), /* software event 1 */
227 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
228 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
229 CINT_MEM = (1U << 26), /* int mem parity err */
230 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
231 CINT_SRS = (1U << 3), /* SRS event */
232 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
233 CINT_DONE = (1U << 0), /* cmd completion */
234
235 /* shl for ports 1-3 */
236 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
237 CINT_PORT = (1U << 8), /* port0 event */
238 CINT_PORT_MASK_OFFSET = 8,
239 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
240
241 /* TX (delivery) ring bits */
242 TXQ_CMD_SHIFT = 29,
243 TXQ_CMD_SSP = 1, /* SSP protocol */
244 TXQ_CMD_SMP = 2, /* SMP protocol */
245 TXQ_CMD_STP = 3, /* STP/SATA protocol */
246 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
247 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
248 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
249 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
250 TXQ_SRS_SHIFT = 20, /* SATA register set */
251 TXQ_SRS_MASK = 0x7f,
252 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
253 TXQ_PHY_MASK = 0xff,
254 TXQ_SLOT_MASK = 0xfff, /* slot number */
255
256 /* RX (completion) ring bits */
257 RXQ_GOOD = (1U << 23), /* Response good */
258 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
259 RXQ_CMD_RX = (1U << 20), /* target cmd received */
260 RXQ_ATTN = (1U << 19), /* attention */
261 RXQ_RSP = (1U << 18), /* response frame xfer'd */
262 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
263 RXQ_DONE = (1U << 16), /* cmd complete */
264 RXQ_SLOT_MASK = 0xfff, /* slot number */
265
266 /* mvs_cmd_hdr bits */
267 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
268 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
269
270 /* SSP initiator only */
271 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
272
273 /* SSP initiator or target */
274 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
275
276 /* SSP target only */
277 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
278 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
279 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
280 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
281
282 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
283 MCH_FBURST = (1U << 11), /* first burst (SSP) */
284 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
285 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
286 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
287 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
288 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
289 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
290 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
291 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
292
293 CCTL_RST = (1U << 5), /* port logic reset */
294
295 /* 0(LSB first), 1(MSB first) */
296 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
297 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
298 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
299 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
300
301 /* MVS_Px_SER_CTLSTAT (per-phy control) */
302 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
303 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
304 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
305 PHY_RST = (1U << 0), /* phy reset */
306 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
307 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
308 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
309 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
310 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
311 PHY_READY_MASK = (1U << 20),
312
313 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
314 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
315 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
316 PHYEV_AN = (1U << 18), /* SATA async notification */
317 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
318 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
319 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
320 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
321 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
322 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
323 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
324 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
325 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
326 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
327 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
328 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
329 PHYEV_ID_DONE = (1U << 2), /* identify done */
330 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
331 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
332
333 /* MVS_PCS */
334 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
335 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
336 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
337 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
338 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
339 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
340 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
341 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
342 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
343 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
344
345 /* Port n Attached Device Info */
346 PORT_DEV_SSP_TRGT = (1U << 19),
347 PORT_DEV_SMP_TRGT = (1U << 18),
348 PORT_DEV_STP_TRGT = (1U << 17),
349 PORT_DEV_SSP_INIT = (1U << 11),
350 PORT_DEV_SMP_INIT = (1U << 10),
351 PORT_DEV_STP_INIT = (1U << 9),
352 PORT_PHY_ID_MASK = (0xFFU << 24),
353 PORT_DEV_TRGT_MASK = (0x7U << 17),
354 PORT_DEV_INIT_MASK = (0x7U << 9),
355 PORT_DEV_TYPE_MASK = (0x7U << 0),
356
357 /* Port n PHY Status */
358 PHY_RDY = (1U << 2),
359 PHY_DW_SYNC = (1U << 1),
360 PHY_OOB_DTCTD = (1U << 0),
361
362 /* VSR */
363 /* PHYMODE 6 (CDB) */
364 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
365 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
366 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
367 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
368 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
369 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
370 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
371 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
372 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
373 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
374 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
375 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
376 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
377 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
378};
379
380enum mvs_info_flags {
381 MVF_MSI = (1U << 0), /* MSI is enabled */
382 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
383};
384
385enum sas_cmd_port_registers {
386 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
387 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
388 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
389 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
390 CMD_OOB_SPACE = 0x110, /* OOB space control register */
391 CMD_OOB_BURST = 0x114, /* OOB burst control register */
392 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
393 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
394 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
395 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
396 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
397 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
398 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
399 CMD_ID_TEST = 0x134, /* ID test register */
400 CMD_PL_TIMER = 0x138, /* PL timer register */
401 CMD_WD_TIMER = 0x13c, /* WD timer register */
402 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
403 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
404 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
405 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
406 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
407 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
408 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
409 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
410 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
411 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
412 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
413 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
414 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
415 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
416 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
417 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
418 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
419 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
420 CMD_RESET_COUNT = 0x188, /* Reset Count */
421 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
422 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
423 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
424 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
425 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
426 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
427 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
428 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
429 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
430 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
431 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
432 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
433 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
434 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
435 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
436};
437
438/* SAS/SATA configuration port registers, aka phy registers */
439enum sas_sata_config_port_regs {
440 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
441 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
442 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
443 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
444 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
445 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
446 PHYR_SATA_CTL = 0x18, /* SATA control */
447 PHYR_PHY_STAT = 0x1C, /* PHY status */
448 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
449 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
450 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
451 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
452 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
453 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
454 PHYR_WIDE_PORT = 0x38, /* wide port participating */
455 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
456 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
457 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
458};
459
460/* SAS/SATA Vendor Specific Port Registers */
461enum sas_sata_vsp_regs {
462 VSR_PHY_STAT = 0x00, /* Phy Status */
463 VSR_PHY_MODE1 = 0x01, /* phy tx */
464 VSR_PHY_MODE2 = 0x02, /* tx scc */
465 VSR_PHY_MODE3 = 0x03, /* pll */
466 VSR_PHY_MODE4 = 0x04, /* VCO */
467 VSR_PHY_MODE5 = 0x05, /* Rx */
468 VSR_PHY_MODE6 = 0x06, /* CDR */
469 VSR_PHY_MODE7 = 0x07, /* Impedance */
470 VSR_PHY_MODE8 = 0x08, /* Voltage */
471 VSR_PHY_MODE9 = 0x09, /* Test */
472 VSR_PHY_MODE10 = 0x0A, /* Power */
473 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
474 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
475 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
476};
477
478enum pci_cfg_registers {
479 PCR_PHY_CTL = 0x40,
480 PCR_PHY_CTL2 = 0x90,
481 PCR_DEV_CTRL = 0xE8,
482};
483
484enum pci_cfg_register_bits {
485 PCTL_PWR_ON = (0xFU << 24),
486 PCTL_OFF = (0xFU << 12),
487 PRD_REQ_SIZE = (0x4000),
488 PRD_REQ_MASK = (0x00007000),
489};
490
491enum nvram_layout_offsets {
492 NVR_SIG = 0x00, /* 0xAA, 0x55 */
493 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
494};
495
496enum chip_flavors {
497 chip_6320,
498 chip_6440,
499 chip_6480,
500};
501
502enum port_type {
503 PORT_TYPE_SAS = (1L << 1),
504 PORT_TYPE_SATA = (1L << 0),
505};
506
507/* Command Table Format */
508enum ct_format {
509 /* SSP */
510 SSP_F_H = 0x00,
511 SSP_F_IU = 0x18,
512 SSP_F_MAX = 0x4D,
513 /* STP */
514 STP_CMD_FIS = 0x00,
515 STP_ATAPI_CMD = 0x40,
516 STP_F_MAX = 0x10,
517 /* SMP */
518 SMP_F_T = 0x00,
519 SMP_F_DEP = 0x01,
520 SMP_F_MAX = 0x101,
521};
522
523enum status_buffer {
524 SB_EIR_OFF = 0x00, /* Error Information Record */
525 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
526 SB_RFB_MAX = 0x400, /* RFB size*/
527};
528
529enum error_info_rec {
530 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
531 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
532 RSP_OVER = (1U << 29), /* rsp buffer overflow */
533 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
534 UNK_FIS = (1U << 27), /* unknown FIS */
535 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
536 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
537 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
538 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
539 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
540 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
541 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
542 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
543 INTERLOCK = (1U << 15), /* interlock error */
544 NAK = (1U << 14), /* NAK rx'd */
545 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
546 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
547 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
548 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
549 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
550 STP_RES_BSY = (1U << 8), /* STP resources busy */
551 BREAK = (1U << 7), /* break received */
552 BAD_DEST = (1U << 6), /* bad destination */
553 BAD_PROTO = (1U << 5), /* protocol not supported */
554 BAD_RATE = (1U << 4), /* cxn rate not supported */
555 WRONG_DEST = (1U << 3), /* wrong destination error */
556 CREDIT_TO = (1U << 2), /* credit timeout */
557 WDOG_TO = (1U << 1), /* watchdog timeout */
558 BUF_PAR = (1U << 0), /* buffer parity error */
559};
560
561enum error_info_rec_2 {
562 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
563 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
564 APP_CHK_ERR = (1U << 13), /* Application Check error */
565 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
566 USR_BLK_NM = (1U << 0), /* User Block Number */
567};
568
569struct mvs_chip_info {
570 u32 n_phy;
571 u32 srs_sz;
572 u32 slot_width;
573};
574
575struct mvs_err_info {
576 __le32 flags;
577 __le32 flags2;
578};
579
580struct mvs_prd {
581 __le64 addr; /* 64-bit buffer address */
582 __le32 reserved;
583 __le32 len; /* 16-bit length */
584};
585
586struct mvs_cmd_hdr {
587 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
588 __le32 lens; /* cmd, max resp frame len */
589 __le32 tags; /* targ port xfer tag; tag */
590 __le32 data_len; /* data xfer len */
591 __le64 cmd_tbl; /* command table address */
592 __le64 open_frame; /* open addr frame address */
593 __le64 status_buf; /* status buffer address */
594 __le64 prd_tbl; /* PRD tbl address */
595 __le32 reserved[4];
596};
597
598struct mvs_port {
599 struct asd_sas_port sas_port;
600 u8 port_attached;
601 u8 taskfileset;
602 u8 wide_port_phymap;
603 struct list_head list;
604};
605
606struct mvs_phy {
607 struct mvs_port *port;
608 struct asd_sas_phy sas_phy;
609 struct sas_identify identify;
610 struct scsi_device *sdev;
611 u64 dev_sas_addr;
612 u64 att_dev_sas_addr;
613 u32 att_dev_info;
614 u32 dev_info;
615 u32 phy_type;
616 u32 phy_status;
617 u32 irq_status;
618 u32 frame_rcvd_size;
619 u8 frame_rcvd[32];
620 u8 phy_attached;
621 enum sas_linkrate minimum_linkrate;
622 enum sas_linkrate maximum_linkrate;
623};
624
625struct mvs_slot_info {
626 struct list_head list;
627 struct sas_task *task;
628 u32 n_elem;
629 u32 tx;
630
631 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
632 * and PRD table
633 */
634 void *buf;
635 dma_addr_t buf_dma;
636#if _MV_DUMP
637 u32 cmd_size;
638#endif
639
640 void *response;
641 struct mvs_port *port;
642};
643
644struct mvs_info {
645 unsigned long flags;
646
647 spinlock_t lock; /* host-wide lock */
648 struct pci_dev *pdev; /* our device */
649 void __iomem *regs; /* enhanced mode registers */
650 void __iomem *peri_regs; /* peripheral registers */
651
652 u8 sas_addr[SAS_ADDR_SIZE];
653 struct sas_ha_struct sas; /* SCSI/SAS glue */
654 struct Scsi_Host *shost;
655
656 __le32 *tx; /* TX (delivery) DMA ring */
657 dma_addr_t tx_dma;
658 u32 tx_prod; /* cached next-producer idx */
659
660 __le32 *rx; /* RX (completion) DMA ring */
661 dma_addr_t rx_dma;
662 u32 rx_cons; /* RX consumer idx */
663
664 __le32 *rx_fis; /* RX'd FIS area */
665 dma_addr_t rx_fis_dma;
666
667 struct mvs_cmd_hdr *slot; /* DMA command header slots */
668 dma_addr_t slot_dma;
669
670 const struct mvs_chip_info *chip;
671
672 u8 tags[MVS_SLOTS];
673 struct mvs_slot_info slot_info[MVS_SLOTS];
674 /* further per-slot information */
675 struct mvs_phy phy[MVS_MAX_PHYS];
676 struct mvs_port port[MVS_MAX_PHYS];
677#ifdef MVS_USE_TASKLET
678 struct tasklet_struct tasklet;
679#endif
680};
681
682static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
683 void *funcdata);
684static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
685static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
686static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
687static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
688static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
689static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
690
691static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
692static void mvs_detect_porttype(struct mvs_info *mvi, int i);
693static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
694static void mvs_release_task(struct mvs_info *mvi, int phy_no);
695
696static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
697static void mvs_scan_start(struct Scsi_Host *);
698static int mvs_slave_configure(struct scsi_device *sdev);
699
700static struct scsi_transport_template *mvs_stt;
701
702static const struct mvs_chip_info mvs_chips[] = {
703 [chip_6320] = { 2, 16, 9 },
704 [chip_6440] = { 4, 16, 9 },
705 [chip_6480] = { 8, 32, 10 },
706};
707
708static struct scsi_host_template mvs_sht = {
709 .module = THIS_MODULE,
710 .name = DRV_NAME,
711 .queuecommand = sas_queuecommand,
712 .target_alloc = sas_target_alloc,
713 .slave_configure = mvs_slave_configure,
714 .slave_destroy = sas_slave_destroy,
715 .scan_finished = mvs_scan_finished,
716 .scan_start = mvs_scan_start,
717 .change_queue_depth = sas_change_queue_depth,
718 .change_queue_type = sas_change_queue_type,
719 .bios_param = sas_bios_param,
720 .can_queue = 1,
721 .cmd_per_lun = 1,
722 .this_id = -1,
723 .sg_tablesize = SG_ALL,
724 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
725 .use_clustering = ENABLE_CLUSTERING,
726 .eh_device_reset_handler = sas_eh_device_reset_handler,
727 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
728 .slave_alloc = sas_slave_alloc,
729 .target_destroy = sas_target_destroy,
730 .ioctl = sas_ioctl,
731};
732
733static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
734{
735 u32 i;
736 u32 run;
737 u32 offset;
738
739 offset = 0;
740 while (size) {
741 printk("%08X : ", baseaddr + offset);
742 if (size >= 16)
743 run = 16;
744 else
745 run = size;
746 size -= run;
747 for (i = 0; i < 16; i++) {
748 if (i < run)
749 printk("%02X ", (u32)data[i]);
750 else
751 printk(" ");
752 }
753 printk(": ");
754 for (i = 0; i < run; i++)
755 printk("%c", isalnum(data[i]) ? data[i] : '.');
756 printk("\n");
757 data = &data[16];
758 offset += run;
759 }
760 printk("\n");
761}
762
763#if _MV_DUMP
764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
765 enum sas_protocol proto)
766{
767 u32 offset;
768 struct pci_dev *pdev = mvi->pdev;
769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
770
771 offset = slot->cmd_size + MVS_OAF_SZ +
772 sizeof(struct mvs_prd) * slot->n_elem;
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
774 tag);
775 mvs_hexdump(32, (u8 *) slot->response,
776 (u32) slot->buf_dma + offset);
777}
778#endif
779
780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
781 enum sas_protocol proto)
782{
783#if _MV_DUMP
784 u32 sz, w_ptr;
785 u64 addr;
786 void __iomem *regs = mvi->regs;
787 struct pci_dev *pdev = mvi->pdev;
788 struct mvs_slot_info *slot = &mvi->slot_info[tag];
789
790 /*Delivery Queue */
791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
792 w_ptr = slot->tx;
793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
794 dev_printk(KERN_DEBUG, &pdev->dev,
795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
796 dev_printk(KERN_DEBUG, &pdev->dev,
797 "Delivery Queue Base Address=0x%llX (PA)"
798 "(tx_dma=0x%llX), Entry=%04d\n",
799 addr, mvi->tx_dma, w_ptr);
800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
802 /*Command List */
803 addr = mvi->slot_dma;
804 dev_printk(KERN_DEBUG, &pdev->dev,
805 "Command List Base Address=0x%llX (PA)"
806 "(slot_dma=0x%llX), Header=%03d\n",
807 addr, slot->buf_dma, tag);
808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
809 /*mvs_cmd_hdr */
810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
811 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
812 /*1.command table area */
813 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
814 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
815 /*2.open address frame area */
816 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
817 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
818 (u32) slot->buf_dma + slot->cmd_size);
819 /*3.status buffer */
820 mvs_hba_sb_dump(mvi, tag, proto);
821 /*4.PRD table */
822 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
823 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
824 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
825 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
826#endif
827}
828
829static void mvs_hba_cq_dump(struct mvs_info *mvi)
830{
831#if (_MV_DUMP > 2)
832 u64 addr;
833 void __iomem *regs = mvi->regs;
834 struct pci_dev *pdev = mvi->pdev;
835 u32 entry = mvi->rx_cons + 1;
836 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
837
838 /*Completion Queue */
839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
842 dev_printk(KERN_DEBUG, &pdev->dev,
843 "Completion List Base Address=0x%llX (PA), "
844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
845 addr, entry - 1, mvi->rx[0]);
846 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
847 mvi->rx_dma + sizeof(u32) * entry);
848#endif
849}
850
851static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
852{
853 void __iomem *regs = mvi->regs;
854 u32 tmp;
855
856 tmp = mr32(GBL_CTL);
857
858 mw32(GBL_CTL, tmp | INT_EN);
859}
860
861static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
862{
863 void __iomem *regs = mvi->regs;
864 u32 tmp;
865
866 tmp = mr32(GBL_CTL);
867
868 mw32(GBL_CTL, tmp & ~INT_EN);
869}
870
871static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
872
873/* move to PCI layer or libata core? */
874static int pci_go_64(struct pci_dev *pdev)
875{
876 int rc;
877
878 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
879 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
880 if (rc) {
881 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
882 if (rc) {
883 dev_printk(KERN_ERR, &pdev->dev,
884 "64-bit DMA enable failed\n");
885 return rc;
886 }
887 }
888 } else {
889 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
890 if (rc) {
891 dev_printk(KERN_ERR, &pdev->dev,
892 "32-bit DMA enable failed\n");
893 return rc;
894 }
895 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
896 if (rc) {
897 dev_printk(KERN_ERR, &pdev->dev,
898 "32-bit consistent DMA enable failed\n");
899 return rc;
900 }
901 }
902
903 return rc;
904}
905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
918{
919 void *bitmap = (void *) &mvi->tags;
920 clear_bit(tag, bitmap);
921}
922
923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
924{
925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
932}
933
934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
935{
936 unsigned int index, tag;
937 void *bitmap = (void *) &mvi->tags;
938
939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
940 tag = index;
941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
946}
947
948static void mvs_tag_init(struct mvs_info *mvi)
949{
950 int i;
951 for (i = 0; i < MVS_SLOTS; ++i)
952 mvs_tag_clear(mvi, i);
953}
954
955#ifndef MVS_DISABLE_NVRAM
956static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
957{
958 int timeout = 1000;
959
960 if (addr & ~SPI_ADDR_MASK)
961 return -EINVAL;
962
963 writel(addr, regs + SPI_CMD);
964 writel(TWSI_RD, regs + SPI_CTL);
965
966 while (timeout-- > 0) {
967 if (readl(regs + SPI_CTL) & TWSI_RDY) {
968 *data = readl(regs + SPI_DATA);
969 return 0;
970 }
971
972 udelay(10);
973 }
974
975 return -EBUSY;
976}
977
978static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
979 void *buf, u32 buflen)
980{
981 u32 addr_end, tmp_addr, i, j;
982 u32 tmp = 0;
983 int rc;
984 u8 *tmp8, *buf8 = buf;
985
986 addr_end = addr + buflen;
987 tmp_addr = ALIGN(addr, 4);
988 if (addr > 0xff)
989 return -EINVAL;
990
991 j = addr & 0x3;
992 if (j) {
993 rc = mvs_eep_read(regs, tmp_addr, &tmp);
994 if (rc)
995 return rc;
996
997 tmp8 = (u8 *)&tmp;
998 for (i = j; i < 4; i++)
999 *buf8++ = tmp8[i];
1000
1001 tmp_addr += 4;
1002 }
1003
1004 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1005 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1006 if (rc)
1007 return rc;
1008
1009 memcpy(buf8, &tmp, 4);
1010 buf8 += 4;
1011 }
1012
1013 if (tmp_addr < addr_end) {
1014 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1015 if (rc)
1016 return rc;
1017
1018 tmp8 = (u8 *)&tmp;
1019 j = addr_end - tmp_addr;
1020 for (i = 0; i < j; i++)
1021 *buf8++ = tmp8[i];
1022
1023 tmp_addr += 4;
1024 }
1025
1026 return 0;
1027}
1028#endif
1029
1030static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
1031 void *buf, u32 buflen)
1032{
1033#ifndef MVS_DISABLE_NVRAM
1034 void __iomem *regs = mvi->regs;
1035 int rc, i;
1036 u32 sum;
1037 u8 hdr[2], *tmp;
1038 const char *msg;
1039
1040 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1041 if (rc) {
1042 msg = "nvram hdr read failed";
1043 goto err_out;
1044 }
1045 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1046 if (rc) {
1047 msg = "nvram read failed";
1048 goto err_out;
1049 }
1050
1051 if (hdr[0] != 0x5A) {
1052 /* entry id */
1053 msg = "invalid nvram entry id";
1054 rc = -ENOENT;
1055 goto err_out;
1056 }
1057
1058 tmp = buf;
1059 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1060 for (i = 0; i < buflen; i++)
1061 sum += ((u32)tmp[i]);
1062
1063 if (sum) {
1064 msg = "nvram checksum failure";
1065 rc = -EILSEQ;
1066 goto err_out;
1067 }
1068
1069 return 0;
1070
1071err_out:
1072 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1073 return rc;
1074#else
1075 /* FIXME , For SAS target mode */
1076 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1077 return 0;
1078#endif
1079}
1080
1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1082{
1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1085
1086 if (!phy->phy_attached)
1087 return;
1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1099 if (phy->phy_type & PORT_TYPE_SAS) {
1100 struct sas_identify_frame *id;
1101
1102 id = (struct sas_identify_frame *)phy->frame_rcvd;
1103 id->dev_type = phy->identify.device_type;
1104 id->initiator_bits = SAS_PROTOCOL_ALL;
1105 id->target_bits = phy->identify.target_port_protocols;
1106 } else if (phy->phy_type & PORT_TYPE_SATA) {
1107 /* TODO */
1108 }
1109 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1110 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1111 PORTE_BYTES_DMAED);
1112}
1113
1114static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1115{
1116 /* give the phy enabling interrupt event time to come in (1s
1117 * is empirically about all it takes) */
1118 if (time < HZ)
1119 return 0;
1120 /* Wait for discovery to finish */
1121 scsi_flush_work(shost);
1122 return 1;
1123}
1124
1125static void mvs_scan_start(struct Scsi_Host *shost)
1126{
1127 int i;
1128 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1129
1130 for (i = 0; i < mvi->chip->n_phy; ++i) {
1131 mvs_bytes_dmaed(mvi, i);
1132 }
1133}
1134
1135static int mvs_slave_configure(struct scsi_device *sdev)
1136{
1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1139
1140 if (ret)
1141 return ret;
1142
1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1152}
1153
1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1155{
1156 struct pci_dev *pdev = mvi->pdev;
1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1160
1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1162 /*
1163 * events is port event now ,
1164 * we need check the interrupt status which belongs to per port.
1165 */
1166 dev_printk(KERN_DEBUG, &pdev->dev,
1167 "Port %d Event = %X\n",
1168 phy_no, phy->irq_status);
1169
1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1173 sas_phy_disconnected(sas_phy);
1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1178 } else
1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1180 }
1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1182 if (phy->irq_status & PHYEV_COMWAKE) {
1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1184 mvs_write_port_irq_mask(mvi, phy_no,
1185 tmp | PHYEV_SIG_FIS);
1186 }
1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1189 if (phy->phy_status) {
1190 mvs_detect_porttype(mvi, phy_no);
1191
1192 if (phy->phy_type & PORT_TYPE_SATA) {
1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1194 phy_no);
1195 tmp &= ~PHYEV_SIG_FIS;
1196 mvs_write_port_irq_mask(mvi,
1197 phy_no, tmp);
1198 }
1199
1200 mvs_update_phyinfo(mvi, phy_no, 0);
1201 sas_ha->notify_phy_event(sas_phy,
1202 PHYE_OOB_DONE);
1203 mvs_bytes_dmaed(mvi, phy_no);
1204 } else {
1205 dev_printk(KERN_DEBUG, &pdev->dev,
1206 "plugin interrupt but phy is gone\n");
1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1208 NULL);
1209 }
1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1212 sas_ha->notify_port_event(sas_phy,
1213 PORTE_BROADCAST_RCVD);
1214 }
1215 }
1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1217}
1218
1219static void mvs_int_sata(struct mvs_info *mvi)
1220{
1221 u32 tmp;
1222 void __iomem *regs = mvi->regs;
1223 tmp = mr32(INT_STAT_SRS);
1224 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1225}
1226
1227static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1228 u32 slot_idx)
1229{
1230 void __iomem *regs = mvi->regs;
1231 struct domain_device *dev = task->dev;
1232 struct asd_sas_port *sas_port = dev->port;
1233 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1234 u32 reg_set, phy_mask;
1235
1236 if (!sas_protocol_ata(task->task_proto)) {
1237 reg_set = 0;
1238 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1239 sas_port->phy_mask;
1240 } else {
1241 reg_set = port->taskfileset;
1242 phy_mask = sas_port->phy_mask;
1243 }
1244 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1245 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1246 (phy_mask << TXQ_PHY_SHIFT) |
1247 (reg_set << TXQ_SRS_SHIFT));
1248
1249 mw32(TX_PROD_IDX, mvi->tx_prod);
1250 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1251}
1252
1253static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1254 u32 slot_idx, int err)
1255{
1256 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1257 struct task_status_struct *tstat = &task->task_status;
1258 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1259 int stat = SAM_GOOD;
1260
1261 resp->frame_len = sizeof(struct dev_to_host_fis);
1262 memcpy(&resp->ending_fis[0],
1263 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1264 sizeof(struct dev_to_host_fis));
1265 tstat->buf_valid_size = sizeof(*resp);
1266 if (unlikely(err))
1267 stat = SAS_PROTO_RESPONSE;
1268 return stat;
1269}
1270
1271static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1272{
1273 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1274 mvs_tag_clear(mvi, slot_idx);
1275}
1276
1277static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1278 struct mvs_slot_info *slot, u32 slot_idx)
1279{
1280 if (!sas_protocol_ata(task->task_proto))
1281 if (slot->n_elem)
1282 pci_unmap_sg(mvi->pdev, task->scatter,
1283 slot->n_elem, task->data_dir);
1284
1285 switch (task->task_proto) {
1286 case SAS_PROTOCOL_SMP:
1287 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1288 PCI_DMA_FROMDEVICE);
1289 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1290 PCI_DMA_TODEVICE);
1291 break;
1292
1293 case SAS_PROTOCOL_SATA:
1294 case SAS_PROTOCOL_STP:
1295 case SAS_PROTOCOL_SSP:
1296 default:
1297 /* do nothing */
1298 break;
1299 }
1300 list_del(&slot->list);
1301 task->lldd_task = NULL;
1302 slot->task = NULL;
1303 slot->port = NULL;
1304}
1305
1306static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1307 u32 slot_idx)
1308{
1309 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1310 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1311 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1312 int stat = SAM_CHECK_COND;
1313
1314 if (err_dw1 & SLOT_BSY_ERR) {
1315 stat = SAS_QUEUE_FULL;
1316 mvs_slot_reset(mvi, task, slot_idx);
1317 }
1318 switch (task->task_proto) {
1319 case SAS_PROTOCOL_SSP:
1320 break;
1321 case SAS_PROTOCOL_SMP:
1322 break;
1323 case SAS_PROTOCOL_SATA:
1324 case SAS_PROTOCOL_STP:
1325 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1326 if (err_dw0 & TFILE_ERR)
1327 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1328 break;
1329 default:
1330 break;
1331 }
1332
1333 mvs_hexdump(16, (u8 *) slot->response, 0);
1334 return stat;
1335}
1336
1337static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1338{
1339 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1340 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1341 struct sas_task *task = slot->task;
1342 struct task_status_struct *tstat;
1343 struct mvs_port *port;
1344 bool aborted;
1345 void *to;
1346
1347 if (unlikely(!task || !task->lldd_task))
1348 return -1;
1349
1350 mvs_hba_cq_dump(mvi);
1351
1352 spin_lock(&task->task_state_lock);
1353 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1354 if (!aborted) {
1355 task->task_state_flags &=
1356 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1357 task->task_state_flags |= SAS_TASK_STATE_DONE;
1358 }
1359 spin_unlock(&task->task_state_lock);
1360
1361 if (aborted) {
1362 mvs_slot_task_free(mvi, task, slot, slot_idx);
1363 mvs_slot_free(mvi, rx_desc);
1364 return -1;
1365 }
1366
1367 port = slot->port;
1368 tstat = &task->task_status;
1369 memset(tstat, 0, sizeof(*tstat));
1370 tstat->resp = SAS_TASK_COMPLETE;
1371
1372 if (unlikely(!port->port_attached || flags)) {
1373 mvs_slot_err(mvi, task, slot_idx);
1374 if (!sas_protocol_ata(task->task_proto))
1375 tstat->stat = SAS_PHY_DOWN;
1376 goto out;
1377 }
1378
1379 /* error info record present */
1380 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1381 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1382 goto out;
1383 }
1384
1385 switch (task->task_proto) {
1386 case SAS_PROTOCOL_SSP:
1387 /* hw says status == 0, datapres == 0 */
1388 if (rx_desc & RXQ_GOOD) {
1389 tstat->stat = SAM_GOOD;
1390 tstat->resp = SAS_TASK_COMPLETE;
1391 }
1392 /* response frame present */
1393 else if (rx_desc & RXQ_RSP) {
1394 struct ssp_response_iu *iu =
1395 slot->response + sizeof(struct mvs_err_info);
1396 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1397 }
1398
1399 /* should never happen? */
1400 else
1401 tstat->stat = SAM_CHECK_COND;
1402 break;
1403
1404 case SAS_PROTOCOL_SMP: {
1405 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1406 tstat->stat = SAM_GOOD;
1407 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1408 memcpy(to + sg_resp->offset,
1409 slot->response + sizeof(struct mvs_err_info),
1410 sg_dma_len(sg_resp));
1411 kunmap_atomic(to, KM_IRQ0);
1412 break;
1413 }
1414
1415 case SAS_PROTOCOL_SATA:
1416 case SAS_PROTOCOL_STP:
1417 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1418 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1419 break;
1420 }
1421
1422 default:
1423 tstat->stat = SAM_CHECK_COND;
1424 break;
1425 }
1426
1427out:
1428 mvs_slot_task_free(mvi, task, slot, slot_idx);
1429 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1430 mvs_slot_free(mvi, rx_desc);
1431
1432 spin_unlock(&mvi->lock);
1433 task->task_done(task);
1434 spin_lock(&mvi->lock);
1435 return tstat->stat;
1436}
1437
1438static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1439{
1440 struct list_head *pos, *n;
1441 struct mvs_slot_info *slot;
1442 struct mvs_phy *phy = &mvi->phy[phy_no];
1443 struct mvs_port *port = phy->port;
1444 u32 rx_desc;
1445
1446 if (!port)
1447 return;
1448
1449 list_for_each_safe(pos, n, &port->list) {
1450 slot = container_of(pos, struct mvs_slot_info, list);
1451 rx_desc = (u32) (slot - mvi->slot_info);
1452 mvs_slot_complete(mvi, rx_desc, 1);
1453 }
1454}
1455
1456static void mvs_int_full(struct mvs_info *mvi)
1457{
1458 void __iomem *regs = mvi->regs;
1459 u32 tmp, stat;
1460 int i;
1461
1462 stat = mr32(INT_STAT);
1463
1464 mvs_int_rx(mvi, false);
1465
1466 for (i = 0; i < MVS_MAX_PORTS; i++) {
1467 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1468 if (tmp)
1469 mvs_int_port(mvi, i, tmp);
1470 }
1471
1472 if (stat & CINT_SRS)
1473 mvs_int_sata(mvi);
1474
1475 mw32(INT_STAT, stat);
1476}
1477
1478static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1479{
1480 void __iomem *regs = mvi->regs;
1481 u32 rx_prod_idx, rx_desc;
1482 bool attn = false;
1483 struct pci_dev *pdev = mvi->pdev;
1484
1485 /* the first dword in the RX ring is special: it contains
1486 * a mirror of the hardware's RX producer index, so that
1487 * we don't have to stall the CPU reading that register.
1488 * The actual RX ring is offset by one dword, due to this.
1489 */
1490 rx_prod_idx = mvi->rx_cons;
1491 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1492 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1493 return 0;
1494
1495 /* The CMPL_Q may come late, read from register and try again
1496 * note: if coalescing is enabled,
1497 * it will need to read from register every time for sure
1498 */
1499 if (mvi->rx_cons == rx_prod_idx)
1500 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1501
1502 if (mvi->rx_cons == rx_prod_idx)
1503 return 0;
1504
1505 while (mvi->rx_cons != rx_prod_idx) {
1506
1507 /* increment our internal RX consumer pointer */
1508 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1509
1510 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1511
1512 if (likely(rx_desc & RXQ_DONE))
1513 mvs_slot_complete(mvi, rx_desc, 0);
1514 if (rx_desc & RXQ_ATTN) {
1515 attn = true;
1516 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1517 rx_desc);
1518 } else if (rx_desc & RXQ_ERR) {
1519 if (!(rx_desc & RXQ_DONE))
1520 mvs_slot_complete(mvi, rx_desc, 0);
1521 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1522 rx_desc);
1523 } else if (rx_desc & RXQ_SLOT_RESET) {
1524 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1525 rx_desc);
1526 mvs_slot_free(mvi, rx_desc);
1527 }
1528 }
1529
1530 if (attn && self_clear)
1531 mvs_int_full(mvi);
1532
1533 return 0;
1534}
1535
1536#ifdef MVS_USE_TASKLET
1537static void mvs_tasklet(unsigned long data)
1538{
1539 struct mvs_info *mvi = (struct mvs_info *) data;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&mvi->lock, flags);
1543
1544#ifdef MVS_DISABLE_MSI
1545 mvs_int_full(mvi);
1546#else
1547 mvs_int_rx(mvi, true);
1548#endif
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550}
1551#endif
1552
1553static irqreturn_t mvs_interrupt(int irq, void *opaque)
1554{
1555 struct mvs_info *mvi = opaque;
1556 void __iomem *regs = mvi->regs;
1557 u32 stat;
1558
1559 stat = mr32(GBL_INT_STAT);
1560
1561 if (stat == 0 || stat == 0xffffffff)
1562 return IRQ_NONE;
1563
1564 /* clear CMD_CMPLT ASAP */
1565 mw32_f(INT_STAT, CINT_DONE);
1566
1567#ifndef MVS_USE_TASKLET
1568 spin_lock(&mvi->lock);
1569
1570 mvs_int_full(mvi);
1571
1572 spin_unlock(&mvi->lock);
1573#else
1574 tasklet_schedule(&mvi->tasklet);
1575#endif
1576 return IRQ_HANDLED;
1577}
1578
1579#ifndef MVS_DISABLE_MSI
1580static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1581{
1582 struct mvs_info *mvi = opaque;
1583
1584#ifndef MVS_USE_TASKLET
1585 spin_lock(&mvi->lock);
1586
1587 mvs_int_rx(mvi, true);
1588
1589 spin_unlock(&mvi->lock);
1590#else
1591 tasklet_schedule(&mvi->tasklet);
1592#endif
1593 return IRQ_HANDLED;
1594}
1595#endif
1596
1597struct mvs_task_exec_info {
1598 struct sas_task *task;
1599 struct mvs_cmd_hdr *hdr;
1600 struct mvs_port *port;
1601 u32 tag;
1602 int n_elem;
1603};
1604
1605static int mvs_task_prep_smp(struct mvs_info *mvi,
1606 struct mvs_task_exec_info *tei)
1607{
1608 int elem, rc, i;
1609 struct sas_task *task = tei->task;
1610 struct mvs_cmd_hdr *hdr = tei->hdr;
1611 struct scatterlist *sg_req, *sg_resp;
1612 u32 req_len, resp_len, tag = tei->tag;
1613 void *buf_tmp;
1614 u8 *buf_oaf;
1615 dma_addr_t buf_tmp_dma;
1616 struct mvs_prd *buf_prd;
1617 struct scatterlist *sg;
1618 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1619 struct asd_sas_port *sas_port = task->dev->port;
1620 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1621#if _MV_DUMP
1622 u8 *buf_cmd;
1623 void *from;
1624#endif
1625 /*
1626 * DMA-map SMP request, response buffers
1627 */
1628 sg_req = &task->smp_task.smp_req;
1629 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1630 if (!elem)
1631 return -ENOMEM;
1632 req_len = sg_dma_len(sg_req);
1633
1634 sg_resp = &task->smp_task.smp_resp;
1635 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1636 if (!elem) {
1637 rc = -ENOMEM;
1638 goto err_out;
1639 }
1640 resp_len = sg_dma_len(sg_resp);
1641
1642 /* must be in dwords */
1643 if ((req_len & 0x3) || (resp_len & 0x3)) {
1644 rc = -EINVAL;
1645 goto err_out_2;
1646 }
1647
1648 /*
1649 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1650 */
1651
1652 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1653 buf_tmp = slot->buf;
1654 buf_tmp_dma = slot->buf_dma;
1655
1656#if _MV_DUMP
1657 buf_cmd = buf_tmp;
1658 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1659 buf_tmp += req_len;
1660 buf_tmp_dma += req_len;
1661 slot->cmd_size = req_len;
1662#else
1663 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1664#endif
1665
1666 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1667 buf_oaf = buf_tmp;
1668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1669
1670 buf_tmp += MVS_OAF_SZ;
1671 buf_tmp_dma += MVS_OAF_SZ;
1672
1673 /* region 3: PRD table ********************************************* */
1674 buf_prd = buf_tmp;
1675 if (tei->n_elem)
1676 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1677 else
1678 hdr->prd_tbl = 0;
1679
1680 i = sizeof(struct mvs_prd) * tei->n_elem;
1681 buf_tmp += i;
1682 buf_tmp_dma += i;
1683
1684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1685 slot->response = buf_tmp;
1686 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1687
1688 /*
1689 * Fill in TX ring and command slot header
1690 */
1691 slot->tx = mvi->tx_prod;
1692 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1693 TXQ_MODE_I | tag |
1694 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1695
1696 hdr->flags |= flags;
1697 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1698 hdr->tags = cpu_to_le32(tag);
1699 hdr->data_len = 0;
1700
1701 /* generate open address frame hdr (first 12 bytes) */
1702 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1703 buf_oaf[1] = task->dev->linkrate & 0xf;
1704 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1705 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1706
1707 /* fill in PRD (scatter/gather) table, if any */
1708 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1709 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1710 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1711 buf_prd++;
1712 }
1713
1714#if _MV_DUMP
1715 /* copy cmd table */
1716 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1717 memcpy(buf_cmd, from + sg_req->offset, req_len);
1718 kunmap_atomic(from, KM_IRQ0);
1719#endif
1720 return 0;
1721
1722err_out_2:
1723 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1724 PCI_DMA_FROMDEVICE);
1725err_out:
1726 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1727 PCI_DMA_TODEVICE);
1728 return rc;
1729}
1730
1731static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1732{
1733 void __iomem *regs = mvi->regs;
1734 u32 tmp, offs;
1735 u8 *tfs = &port->taskfileset;
1736
1737 if (*tfs == MVS_ID_NOT_MAPPED)
1738 return;
1739
1740 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1741 if (*tfs < 16) {
1742 tmp = mr32(PCS);
1743 mw32(PCS, tmp & ~offs);
1744 } else {
1745 tmp = mr32(CTL);
1746 mw32(CTL, tmp & ~offs);
1747 }
1748
1749 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1750 if (tmp)
1751 mw32(INT_STAT_SRS, tmp);
1752
1753 *tfs = MVS_ID_NOT_MAPPED;
1754}
1755
1756static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1757{
1758 int i;
1759 u32 tmp, offs;
1760 void __iomem *regs = mvi->regs;
1761
1762 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1763 return 0;
1764
1765 tmp = mr32(PCS);
1766
1767 for (i = 0; i < mvi->chip->srs_sz; i++) {
1768 if (i == 16)
1769 tmp = mr32(CTL);
1770 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1771 if (!(tmp & offs)) {
1772 port->taskfileset = i;
1773
1774 if (i < 16)
1775 mw32(PCS, tmp | offs);
1776 else
1777 mw32(CTL, tmp | offs);
1778 tmp = mr32(INT_STAT_SRS) & (1U << i);
1779 if (tmp)
1780 mw32(INT_STAT_SRS, tmp);
1781 return 0;
1782 }
1783 }
1784 return MVS_ID_NOT_MAPPED;
1785}
1786
1787static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
1788{
1789 struct ata_queued_cmd *qc = task->uldd_task;
1790
1791 if (qc) {
1792 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1793 qc->tf.command == ATA_CMD_FPDMA_READ) {
1794 *tag = qc->tag;
1795 return 1;
1796 }
1797 }
1798
1799 return 0;
1800}
1801
1802static int mvs_task_prep_ata(struct mvs_info *mvi,
1803 struct mvs_task_exec_info *tei)
1804{
1805 struct sas_task *task = tei->task;
1806 struct domain_device *dev = task->dev;
1807 struct mvs_cmd_hdr *hdr = tei->hdr;
1808 struct asd_sas_port *sas_port = dev->port;
1809 struct mvs_slot_info *slot;
1810 struct scatterlist *sg;
1811 struct mvs_prd *buf_prd;
1812 struct mvs_port *port = tei->port;
1813 u32 tag = tei->tag;
1814 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1815 void *buf_tmp;
1816 u8 *buf_cmd, *buf_oaf;
1817 dma_addr_t buf_tmp_dma;
1818 u32 i, req_len, resp_len;
1819 const u32 max_resp_len = SB_RFB_MAX;
1820
1821 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1822 return -EBUSY;
1823
1824 slot = &mvi->slot_info[tag];
1825 slot->tx = mvi->tx_prod;
1826 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1827 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1828 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1829 (port->taskfileset << TXQ_SRS_SHIFT));
1830
1831 if (task->ata_task.use_ncq)
1832 flags |= MCH_FPDMA;
1833 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1834 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1835 flags |= MCH_ATAPI;
1836 }
1837
1838 /* FIXME: fill in port multiplier number */
1839
1840 hdr->flags = cpu_to_le32(flags);
1841
1842 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1843 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
1844 task->ata_task.fis.sector_count |= hdr->tags << 3;
1845 else
1846 hdr->tags = cpu_to_le32(tag);
1847 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1848
1849 /*
1850 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1851 */
1852
1853 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1854 buf_cmd = buf_tmp = slot->buf;
1855 buf_tmp_dma = slot->buf_dma;
1856
1857 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1858
1859 buf_tmp += MVS_ATA_CMD_SZ;
1860 buf_tmp_dma += MVS_ATA_CMD_SZ;
1861#if _MV_DUMP
1862 slot->cmd_size = MVS_ATA_CMD_SZ;
1863#endif
1864
1865 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1866 /* used for STP. unused for SATA? */
1867 buf_oaf = buf_tmp;
1868 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1869
1870 buf_tmp += MVS_OAF_SZ;
1871 buf_tmp_dma += MVS_OAF_SZ;
1872
1873 /* region 3: PRD table ********************************************* */
1874 buf_prd = buf_tmp;
1875 if (tei->n_elem)
1876 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1877 else
1878 hdr->prd_tbl = 0;
1879
1880 i = sizeof(struct mvs_prd) * tei->n_elem;
1881 buf_tmp += i;
1882 buf_tmp_dma += i;
1883
1884 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1885 /* FIXME: probably unused, for SATA. kept here just in case
1886 * we get a STP/SATA error information record
1887 */
1888 slot->response = buf_tmp;
1889 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1890
1891 req_len = sizeof(struct host_to_dev_fis);
1892 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1893 sizeof(struct mvs_err_info) - i;
1894
1895 /* request, response lengths */
1896 resp_len = min(resp_len, max_resp_len);
1897 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1898
1899 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1900 /* fill in command FIS and ATAPI CDB */
1901 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1902 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1903 memcpy(buf_cmd + STP_ATAPI_CMD,
1904 task->ata_task.atapi_packet, 16);
1905
1906 /* generate open address frame hdr (first 12 bytes) */
1907 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1908 buf_oaf[1] = task->dev->linkrate & 0xf;
1909 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1910 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1911
1912 /* fill in PRD (scatter/gather) table, if any */
1913 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1914 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1915 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1916 buf_prd++;
1917 }
1918
1919 return 0;
1920}
1921
1922static int mvs_task_prep_ssp(struct mvs_info *mvi,
1923 struct mvs_task_exec_info *tei)
1924{
1925 struct sas_task *task = tei->task;
1926 struct mvs_cmd_hdr *hdr = tei->hdr;
1927 struct mvs_port *port = tei->port;
1928 struct mvs_slot_info *slot;
1929 struct scatterlist *sg;
1930 struct mvs_prd *buf_prd;
1931 struct ssp_frame_hdr *ssp_hdr;
1932 void *buf_tmp;
1933 u8 *buf_cmd, *buf_oaf, fburst = 0;
1934 dma_addr_t buf_tmp_dma;
1935 u32 flags;
1936 u32 resp_len, req_len, i, tag = tei->tag;
1937 const u32 max_resp_len = SB_RFB_MAX;
1938 u8 phy_mask;
1939
1940 slot = &mvi->slot_info[tag];
1941
1942 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1943 task->dev->port->phy_mask;
1944 slot->tx = mvi->tx_prod;
1945 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1946 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1947 (phy_mask << TXQ_PHY_SHIFT));
1948
1949 flags = MCH_RETRY;
1950 if (task->ssp_task.enable_first_burst) {
1951 flags |= MCH_FBURST;
1952 fburst = (1 << 7);
1953 }
1954 hdr->flags = cpu_to_le32(flags |
1955 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1956 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1957
1958 hdr->tags = cpu_to_le32(tag);
1959 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1960
1961 /*
1962 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1963 */
1964
1965 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1966 buf_cmd = buf_tmp = slot->buf;
1967 buf_tmp_dma = slot->buf_dma;
1968
1969 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1970
1971 buf_tmp += MVS_SSP_CMD_SZ;
1972 buf_tmp_dma += MVS_SSP_CMD_SZ;
1973#if _MV_DUMP
1974 slot->cmd_size = MVS_SSP_CMD_SZ;
1975#endif
1976
1977 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1978 buf_oaf = buf_tmp;
1979 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1980
1981 buf_tmp += MVS_OAF_SZ;
1982 buf_tmp_dma += MVS_OAF_SZ;
1983
1984 /* region 3: PRD table ********************************************* */
1985 buf_prd = buf_tmp;
1986 if (tei->n_elem)
1987 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1988 else
1989 hdr->prd_tbl = 0;
1990
1991 i = sizeof(struct mvs_prd) * tei->n_elem;
1992 buf_tmp += i;
1993 buf_tmp_dma += i;
1994
1995 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1996 slot->response = buf_tmp;
1997 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1998
1999 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
2000 sizeof(struct mvs_err_info) - i;
2001 resp_len = min(resp_len, max_resp_len);
2002
2003 req_len = sizeof(struct ssp_frame_hdr) + 28;
2004
2005 /* request, response lengths */
2006 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
2007
2008 /* generate open address frame hdr (first 12 bytes) */
2009 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
2010 buf_oaf[1] = task->dev->linkrate & 0xf;
2011 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
2012 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
2013
2014 /* fill in SSP frame header (Command Table.SSP frame header) */
2015 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
2016 ssp_hdr->frame_type = SSP_COMMAND;
2017 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
2018 HASHED_SAS_ADDR_SIZE);
2019 memcpy(ssp_hdr->hashed_src_addr,
2020 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
2021 ssp_hdr->tag = cpu_to_be16(tag);
2022
2023 /* fill in command frame IU */
2024 buf_cmd += sizeof(*ssp_hdr);
2025 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
2026 buf_cmd[9] = fburst | task->ssp_task.task_attr |
2027 (task->ssp_task.task_prio << 3);
2028 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
2029
2030 /* fill in PRD (scatter/gather) table, if any */
2031 for_each_sg(task->scatter, sg, tei->n_elem, i) {
2032 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
2033 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
2034 buf_prd++;
2035 }
2036
2037 return 0;
2038}
2039
2040static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
2041{
2042 struct domain_device *dev = task->dev;
2043 struct mvs_info *mvi = dev->port->ha->lldd_ha;
2044 struct pci_dev *pdev = mvi->pdev;
2045 void __iomem *regs = mvi->regs;
2046 struct mvs_task_exec_info tei;
2047 struct sas_task *t = task;
2048 struct mvs_slot_info *slot;
2049 u32 tag = 0xdeadbeef, rc, n_elem = 0;
2050 unsigned long flags;
2051 u32 n = num, pass = 0;
2052
2053 spin_lock_irqsave(&mvi->lock, flags);
2054 do {
2055 dev = t->dev;
2056 tei.port = &mvi->port[dev->port->id];
2057
2058 if (!tei.port->port_attached) {
2059 if (sas_protocol_ata(t->task_proto)) {
2060 rc = SAS_PHY_DOWN;
2061 goto out_done;
2062 } else {
2063 struct task_status_struct *ts = &t->task_status;
2064 ts->resp = SAS_TASK_UNDELIVERED;
2065 ts->stat = SAS_PHY_DOWN;
2066 t->task_done(t);
2067 if (n > 1)
2068 t = list_entry(t->list.next,
2069 struct sas_task, list);
2070 continue;
2071 }
2072 }
2073
2074 if (!sas_protocol_ata(t->task_proto)) {
2075 if (t->num_scatter) {
2076 n_elem = pci_map_sg(mvi->pdev, t->scatter,
2077 t->num_scatter,
2078 t->data_dir);
2079 if (!n_elem) {
2080 rc = -ENOMEM;
2081 goto err_out;
2082 }
2083 }
2084 } else {
2085 n_elem = t->num_scatter;
2086 }
2087
2088 rc = mvs_tag_alloc(mvi, &tag);
2089 if (rc)
2090 goto err_out;
2091
2092 slot = &mvi->slot_info[tag];
2093 t->lldd_task = NULL;
2094 slot->n_elem = n_elem;
2095 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2096 tei.task = t;
2097 tei.hdr = &mvi->slot[tag];
2098 tei.tag = tag;
2099 tei.n_elem = n_elem;
2100
2101 switch (t->task_proto) {
2102 case SAS_PROTOCOL_SMP:
2103 rc = mvs_task_prep_smp(mvi, &tei);
2104 break;
2105 case SAS_PROTOCOL_SSP:
2106 rc = mvs_task_prep_ssp(mvi, &tei);
2107 break;
2108 case SAS_PROTOCOL_SATA:
2109 case SAS_PROTOCOL_STP:
2110 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2111 rc = mvs_task_prep_ata(mvi, &tei);
2112 break;
2113 default:
2114 dev_printk(KERN_ERR, &pdev->dev,
2115 "unknown sas_task proto: 0x%x\n",
2116 t->task_proto);
2117 rc = -EINVAL;
2118 break;
2119 }
2120
2121 if (rc)
2122 goto err_out_tag;
2123
2124 slot->task = t;
2125 slot->port = tei.port;
2126 t->lldd_task = (void *) slot;
2127 list_add_tail(&slot->list, &slot->port->list);
2128 /* TODO: select normal or high priority */
2129
2130 spin_lock(&t->task_state_lock);
2131 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
2132 spin_unlock(&t->task_state_lock);
2133
2134 mvs_hba_memory_dump(mvi, tag, t->task_proto);
2135
2136 ++pass;
2137 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
2138 if (n > 1)
2139 t = list_entry(t->list.next, struct sas_task, list);
2140 } while (--n);
2141
2142 rc = 0;
2143 goto out_done;
2144
2145err_out_tag:
2146 mvs_tag_free(mvi, tag);
2147err_out:
2148 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
2149 if (!sas_protocol_ata(t->task_proto))
2150 if (n_elem)
2151 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
2152 t->data_dir);
2153out_done:
2154 if (pass)
2155 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
2156 spin_unlock_irqrestore(&mvi->lock, flags);
2157 return rc;
2158}
2159
2160static int mvs_task_abort(struct sas_task *task)
2161{
2162 int rc;
2163 unsigned long flags;
2164 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
2165 struct pci_dev *pdev = mvi->pdev;
2166 int tag;
2167
2168 spin_lock_irqsave(&task->task_state_lock, flags);
2169 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
2170 rc = TMF_RESP_FUNC_COMPLETE;
2171 spin_unlock_irqrestore(&task->task_state_lock, flags);
2172 goto out_done;
2173 }
2174 spin_unlock_irqrestore(&task->task_state_lock, flags);
2175
2176 switch (task->task_proto) {
2177 case SAS_PROTOCOL_SMP:
2178 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
2179 break;
2180 case SAS_PROTOCOL_SSP:
2181 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
2182 break;
2183 case SAS_PROTOCOL_SATA:
2184 case SAS_PROTOCOL_STP:
2185 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
2186 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
2187#if _MV_DUMP
2188 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
2189 mvs_hexdump(sizeof(struct host_to_dev_fis),
2190 (void *)&task->ata_task.fis, 0);
2191 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
2192 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
2193#endif
2194 spin_lock_irqsave(&task->task_state_lock, flags);
2195 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
2196 /* TODO */
2197 ;
2198 }
2199 spin_unlock_irqrestore(&task->task_state_lock, flags);
2200 break;
2201 }
2202 default:
2203 break;
2204 }
2205
2206 if (mvs_find_tag(mvi, task, &tag)) {
2207 spin_lock_irqsave(&mvi->lock, flags);
2208 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
2209 spin_unlock_irqrestore(&mvi->lock, flags);
2210 }
2211 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
2212 rc = TMF_RESP_FUNC_COMPLETE;
2213 else
2214 rc = TMF_RESP_FUNC_FAILED;
2215out_done:
2216 return rc;
2217}
2218
2219static void mvs_free(struct mvs_info *mvi)
2220{
2221 int i;
2222
2223 if (!mvi)
2224 return;
2225
2226 for (i = 0; i < MVS_SLOTS; i++) {
2227 struct mvs_slot_info *slot = &mvi->slot_info[i];
2228
2229 if (slot->buf)
2230 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
2231 slot->buf, slot->buf_dma);
2232 }
2233
2234 if (mvi->tx)
2235 dma_free_coherent(&mvi->pdev->dev,
2236 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2237 mvi->tx, mvi->tx_dma);
2238 if (mvi->rx_fis)
2239 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2240 mvi->rx_fis, mvi->rx_fis_dma);
2241 if (mvi->rx)
2242 dma_free_coherent(&mvi->pdev->dev,
2243 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2244 mvi->rx, mvi->rx_dma);
2245 if (mvi->slot)
2246 dma_free_coherent(&mvi->pdev->dev,
2247 sizeof(*mvi->slot) * MVS_SLOTS,
2248 mvi->slot, mvi->slot_dma);
2249#ifdef MVS_ENABLE_PERI
2250 if (mvi->peri_regs)
2251 iounmap(mvi->peri_regs);
2252#endif
2253 if (mvi->regs)
2254 iounmap(mvi->regs);
2255 if (mvi->shost)
2256 scsi_host_put(mvi->shost);
2257 kfree(mvi->sas.sas_port);
2258 kfree(mvi->sas.sas_phy);
2259 kfree(mvi);
2260}
2261
2262/* FIXME: locking? */
2263static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2264 void *funcdata)
2265{
2266 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2267 int rc = 0, phy_id = sas_phy->id;
2268 u32 tmp;
2269
2270 tmp = mvs_read_phy_ctl(mvi, phy_id);
2271
2272 switch (func) {
2273 case PHY_FUNC_SET_LINK_RATE:{
2274 struct sas_phy_linkrates *rates = funcdata;
2275 u32 lrmin = 0, lrmax = 0;
2276
2277 lrmin = (rates->minimum_linkrate << 8);
2278 lrmax = (rates->maximum_linkrate << 12);
2279
2280 if (lrmin) {
2281 tmp &= ~(0xf << 8);
2282 tmp |= lrmin;
2283 }
2284 if (lrmax) {
2285 tmp &= ~(0xf << 12);
2286 tmp |= lrmax;
2287 }
2288 mvs_write_phy_ctl(mvi, phy_id, tmp);
2289 break;
2290 }
2291
2292 case PHY_FUNC_HARD_RESET:
2293 if (tmp & PHY_RST_HARD)
2294 break;
2295 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2296 break;
2297
2298 case PHY_FUNC_LINK_RESET:
2299 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2300 break;
2301
2302 case PHY_FUNC_DISABLE:
2303 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2304 default:
2305 rc = -EOPNOTSUPP;
2306 }
2307
2308 return rc;
2309}
2310
2311static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2312{
2313 struct mvs_phy *phy = &mvi->phy[phy_id];
2314 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2315
2316 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2317 sas_phy->class = SAS;
2318 sas_phy->iproto = SAS_PROTOCOL_ALL;
2319 sas_phy->tproto = 0;
2320 sas_phy->type = PHY_TYPE_PHYSICAL;
2321 sas_phy->role = PHY_ROLE_INITIATOR;
2322 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2323 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2324
2325 sas_phy->id = phy_id;
2326 sas_phy->sas_addr = &mvi->sas_addr[0];
2327 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2328 sas_phy->ha = &mvi->sas;
2329 sas_phy->lldd_phy = phy;
2330}
2331
2332static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2333 const struct pci_device_id *ent)
2334{
2335 struct mvs_info *mvi;
2336 unsigned long res_start, res_len, res_flag;
2337 struct asd_sas_phy **arr_phy;
2338 struct asd_sas_port **arr_port;
2339 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2340 int i;
2341
2342 /*
2343 * alloc and init our per-HBA mvs_info struct
2344 */
2345
2346 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2347 if (!mvi)
2348 return NULL;
2349
2350 spin_lock_init(&mvi->lock);
2351#ifdef MVS_USE_TASKLET
2352 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
2353#endif
2354 mvi->pdev = pdev;
2355 mvi->chip = chip;
2356
2357 if (pdev->device == 0x6440 && pdev->revision == 0)
2358 mvi->flags |= MVF_PHY_PWR_FIX;
2359
2360 /*
2361 * alloc and init SCSI, SAS glue
2362 */
2363
2364 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2365 if (!mvi->shost)
2366 goto err_out;
2367
2368 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2369 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2370 if (!arr_phy || !arr_port)
2371 goto err_out;
2372
2373 for (i = 0; i < MVS_MAX_PHYS; i++) {
2374 mvs_phy_init(mvi, i);
2375 arr_phy[i] = &mvi->phy[i].sas_phy;
2376 arr_port[i] = &mvi->port[i].sas_port;
2377 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
2378 mvi->port[i].wide_port_phymap = 0;
2379 mvi->port[i].port_attached = 0;
2380 INIT_LIST_HEAD(&mvi->port[i].list);
2381 }
2382
2383 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2384 mvi->shost->transportt = mvs_stt;
2385 mvi->shost->max_id = 21;
2386 mvi->shost->max_lun = ~0;
2387 mvi->shost->max_channel = 0;
2388 mvi->shost->max_cmd_len = 16;
2389
2390 mvi->sas.sas_ha_name = DRV_NAME;
2391 mvi->sas.dev = &pdev->dev;
2392 mvi->sas.lldd_module = THIS_MODULE;
2393 mvi->sas.sas_addr = &mvi->sas_addr[0];
2394 mvi->sas.sas_phy = arr_phy;
2395 mvi->sas.sas_port = arr_port;
2396 mvi->sas.num_phys = chip->n_phy;
2397 mvi->sas.lldd_max_execute_num = 1;
2398 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2399 mvi->shost->can_queue = MVS_CAN_QUEUE;
2400 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
2401 mvi->sas.lldd_ha = mvi;
2402 mvi->sas.core.shost = mvi->shost;
2403
2404 mvs_tag_init(mvi);
2405
2406 /*
2407 * ioremap main and peripheral registers
2408 */
2409
2410#ifdef MVS_ENABLE_PERI
2411 res_start = pci_resource_start(pdev, 2);
2412 res_len = pci_resource_len(pdev, 2);
2413 if (!res_start || !res_len)
2414 goto err_out;
2415
2416 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2417 if (!mvi->peri_regs)
2418 goto err_out;
2419#endif
2420
2421 res_start = pci_resource_start(pdev, 4);
2422 res_len = pci_resource_len(pdev, 4);
2423 if (!res_start || !res_len)
2424 goto err_out;
2425
2426 res_flag = pci_resource_flags(pdev, 4);
2427 if (res_flag & IORESOURCE_CACHEABLE)
2428 mvi->regs = ioremap(res_start, res_len);
2429 else
2430 mvi->regs = ioremap_nocache(res_start, res_len);
2431
2432 if (!mvi->regs)
2433 goto err_out;
2434
2435 /*
2436 * alloc and init our DMA areas
2437 */
2438
2439 mvi->tx = dma_alloc_coherent(&pdev->dev,
2440 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2441 &mvi->tx_dma, GFP_KERNEL);
2442 if (!mvi->tx)
2443 goto err_out;
2444 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2445
2446 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2447 &mvi->rx_fis_dma, GFP_KERNEL);
2448 if (!mvi->rx_fis)
2449 goto err_out;
2450 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2451
2452 mvi->rx = dma_alloc_coherent(&pdev->dev,
2453 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2454 &mvi->rx_dma, GFP_KERNEL);
2455 if (!mvi->rx)
2456 goto err_out;
2457 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
2458
2459 mvi->rx[0] = cpu_to_le32(0xfff);
2460 mvi->rx_cons = 0xfff;
2461
2462 mvi->slot = dma_alloc_coherent(&pdev->dev,
2463 sizeof(*mvi->slot) * MVS_SLOTS,
2464 &mvi->slot_dma, GFP_KERNEL);
2465 if (!mvi->slot)
2466 goto err_out;
2467 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2468
2469 for (i = 0; i < MVS_SLOTS; i++) {
2470 struct mvs_slot_info *slot = &mvi->slot_info[i];
2471
2472 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2473 &slot->buf_dma, GFP_KERNEL);
2474 if (!slot->buf)
2475 goto err_out;
2476 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2477 }
2478
2479 /* finally, read NVRAM to get our SAS address */
2480 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2481 goto err_out;
2482 return mvi;
2483
2484err_out:
2485 mvs_free(mvi);
2486 return NULL;
2487}
2488
2489static u32 mvs_cr32(void __iomem *regs, u32 addr)
2490{
2491 mw32(CMD_ADDR, addr);
2492 return mr32(CMD_DATA);
2493}
2494
2495static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2496{
2497 mw32(CMD_ADDR, addr);
2498 mw32(CMD_DATA, val);
2499}
2500
2501static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2502{
2503 void __iomem *regs = mvi->regs;
2504 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2505 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2506}
2507
2508static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2509{
2510 void __iomem *regs = mvi->regs;
2511 if (port < 4)
2512 mw32(P0_SER_CTLSTAT + port * 4, val);
2513 else
2514 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2515}
2516
2517static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2518{
2519 void __iomem *regs = mvi->regs + off;
2520 void __iomem *regs2 = mvi->regs + off2;
2521 return (port < 4)?readl(regs + port * 8):
2522 readl(regs2 + (port - 4) * 8);
2523}
2524
2525static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2526 u32 port, u32 val)
2527{
2528 void __iomem *regs = mvi->regs + off;
2529 void __iomem *regs2 = mvi->regs + off2;
2530 if (port < 4)
2531 writel(val, regs + port * 8);
2532 else
2533 writel(val, regs2 + (port - 4) * 8);
2534}
2535
2536static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2537{
2538 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2539}
2540
2541static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2542{
2543 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2544}
2545
2546static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2547{
2548 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2549}
2550
2551static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2552{
2553 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2554}
2555
2556static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2557{
2558 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2559}
2560
2561static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2562{
2563 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2564}
2565
2566static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2567{
2568 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2569}
2570
2571static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2572{
2573 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2574}
2575
2576static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2577{
2578 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2579}
2580
2581static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2582{
2583 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2584}
2585
2586static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2587{
2588 void __iomem *regs = mvi->regs;
2589 u32 tmp;
2590
2591 /* workaround for SATA R-ERR, to ignore phy glitch */
2592 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2593 tmp &= ~(1 << 9);
2594 tmp |= (1 << 10);
2595 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2596
2597 /* enable retry 127 times */
2598 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2599
2600 /* extend open frame timeout to max */
2601 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2602 tmp &= ~0xffff;
2603 tmp |= 0x3fff;
2604 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2605
2606 /* workaround for WDTIMEOUT , set to 550 ms */
2607 mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
2608
2609 /* not to halt for different port op during wideport link change */
2610 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2611
2612 /* workaround for Seagate disk not-found OOB sequence, recv
2613 * COMINIT before sending out COMWAKE */
2614 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2615 tmp &= 0x0000ffff;
2616 tmp |= 0x00fa0000;
2617 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2618
2619 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2620 tmp &= 0x1fffffff;
2621 tmp |= (2U << 29); /* 8 ms retry */
2622 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2623
2624 /* TEST - for phy decoding error, adjust voltage levels */
2625 mw32(P0_VSR_ADDR + 0, 0x8);
2626 mw32(P0_VSR_DATA + 0, 0x2F0);
2627
2628 mw32(P0_VSR_ADDR + 8, 0x8);
2629 mw32(P0_VSR_DATA + 8, 0x2F0);
2630
2631 mw32(P0_VSR_ADDR + 16, 0x8);
2632 mw32(P0_VSR_DATA + 16, 0x2F0);
2633
2634 mw32(P0_VSR_ADDR + 24, 0x8);
2635 mw32(P0_VSR_DATA + 24, 0x2F0);
2636
2637}
2638
2639static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2640{
2641 void __iomem *regs = mvi->regs;
2642 u32 tmp;
2643
2644 tmp = mr32(PCS);
2645 if (mvi->chip->n_phy <= 4)
2646 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2647 else
2648 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2649 mw32(PCS, tmp);
2650}
2651
2652static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2653{
2654 void __iomem *regs = mvi->regs;
2655 u32 reg;
2656 struct mvs_phy *phy = &mvi->phy[i];
2657
2658 /* TODO check & save device type */
2659 reg = mr32(GBL_PORT_TYPE);
2660
2661 if (reg & MODE_SAS_SATA & (1 << i))
2662 phy->phy_type |= PORT_TYPE_SAS;
2663 else
2664 phy->phy_type |= PORT_TYPE_SATA;
2665}
2666
2667static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2668{
2669 u32 *s = (u32 *) buf;
2670
2671 if (!s)
2672 return NULL;
2673
2674 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2675 s[3] = mvs_read_port_cfg_data(mvi, i);
2676
2677 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2678 s[2] = mvs_read_port_cfg_data(mvi, i);
2679
2680 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2681 s[1] = mvs_read_port_cfg_data(mvi, i);
2682
2683 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2684 s[0] = mvs_read_port_cfg_data(mvi, i);
2685
2686 return (void *)s;
2687}
2688
2689static u32 mvs_is_sig_fis_received(u32 irq_status)
2690{
2691 return irq_status & PHYEV_SIG_FIS;
2692}
2693
2694static void mvs_update_wideport(struct mvs_info *mvi, int i)
2695{
2696 struct mvs_phy *phy = &mvi->phy[i];
2697 struct mvs_port *port = phy->port;
2698 int j, no;
2699
2700 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2701 if (no & 1) {
2702 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2703 mvs_write_port_cfg_data(mvi, no,
2704 port->wide_port_phymap);
2705 } else {
2706 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2707 mvs_write_port_cfg_data(mvi, no, 0);
2708 }
2709}
2710
2711static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2712{
2713 u32 tmp;
2714 struct mvs_phy *phy = &mvi->phy[i];
2715 struct mvs_port *port = phy->port;;
2716
2717 tmp = mvs_read_phy_ctl(mvi, i);
2718
2719 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2720 if (!port)
2721 phy->phy_attached = 1;
2722 return tmp;
2723 }
2724
2725 if (port) {
2726 if (phy->phy_type & PORT_TYPE_SAS) {
2727 port->wide_port_phymap &= ~(1U << i);
2728 if (!port->wide_port_phymap)
2729 port->port_attached = 0;
2730 mvs_update_wideport(mvi, i);
2731 } else if (phy->phy_type & PORT_TYPE_SATA)
2732 port->port_attached = 0;
2733 mvs_free_reg_set(mvi, phy->port);
2734 phy->port = NULL;
2735 phy->phy_attached = 0;
2736 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2737 }
2738 return 0;
2739}
2740
2741static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2742 int get_st)
2743{
2744 struct mvs_phy *phy = &mvi->phy[i];
2745 struct pci_dev *pdev = mvi->pdev;
2746 u32 tmp;
2747 u64 tmp64;
2748
2749 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2750 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2751
2752 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2753 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2754
2755 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2756 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2757
2758 if (get_st) {
2759 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2760 phy->phy_status = mvs_is_phy_ready(mvi, i);
2761 }
2762
2763 if (phy->phy_status) {
2764 u32 phy_st;
2765 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2766
2767 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2768 phy_st = mvs_read_port_cfg_data(mvi, i);
2769
2770 sas_phy->linkrate =
2771 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2772 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2773 phy->minimum_linkrate =
2774 (phy->phy_status &
2775 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
2776 phy->maximum_linkrate =
2777 (phy->phy_status &
2778 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
2779
2780 if (phy->phy_type & PORT_TYPE_SAS) {
2781 /* Updated attached_sas_addr */
2782 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2783 phy->att_dev_sas_addr =
2784 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2785 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2786 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2787 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2788 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2789 phy->identify.device_type =
2790 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2791
2792 if (phy->identify.device_type == SAS_END_DEV)
2793 phy->identify.target_port_protocols =
2794 SAS_PROTOCOL_SSP;
2795 else if (phy->identify.device_type != NO_DEVICE)
2796 phy->identify.target_port_protocols =
2797 SAS_PROTOCOL_SMP;
2798 if (phy_st & PHY_OOB_DTCTD)
2799 sas_phy->oob_mode = SAS_OOB_MODE;
2800 phy->frame_rcvd_size =
2801 sizeof(struct sas_identify_frame);
2802 } else if (phy->phy_type & PORT_TYPE_SATA) {
2803 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2804 if (mvs_is_sig_fis_received(phy->irq_status)) {
2805 phy->att_dev_sas_addr = i; /* temp */
2806 if (phy_st & PHY_OOB_DTCTD)
2807 sas_phy->oob_mode = SATA_OOB_MODE;
2808 phy->frame_rcvd_size =
2809 sizeof(struct dev_to_host_fis);
2810 mvs_get_d2h_reg(mvi, i,
2811 (void *)sas_phy->frame_rcvd);
2812 } else {
2813 dev_printk(KERN_DEBUG, &pdev->dev,
2814 "No sig fis\n");
2815 phy->phy_type &= ~(PORT_TYPE_SATA);
2816 goto out_done;
2817 }
2818 }
2819 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2820 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2821
2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n",
2825 i,
2826 (unsigned long long)phy->att_dev_sas_addr,
2827 (unsigned long long)phy->dev_sas_addr);
2828 dev_printk(KERN_DEBUG, &pdev->dev,
2829 "Rate = %x , type = %d\n",
2830 sas_phy->linkrate, phy->phy_type);
2831
2832 /* workaround for HW phy decoding error on 1.5g disk drive */
2833 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2834 tmp = mvs_read_port_vsr_data(mvi, i);
2835 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2836 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2837 SAS_LINK_RATE_1_5_GBPS)
2838 tmp &= ~PHY_MODE6_LATECLK;
2839 else
2840 tmp |= PHY_MODE6_LATECLK;
2841 mvs_write_port_vsr_data(mvi, i, tmp);
2842
2843 }
2844out_done:
2845 if (get_st)
2846 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2847}
2848
2849static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2850{
2851 struct sas_ha_struct *sas_ha = sas_phy->ha;
2852 struct mvs_info *mvi = sas_ha->lldd_ha;
2853 struct asd_sas_port *sas_port = sas_phy->port;
2854 struct mvs_phy *phy = sas_phy->lldd_phy;
2855 struct mvs_port *port = &mvi->port[sas_port->id];
2856 unsigned long flags;
2857
2858 spin_lock_irqsave(&mvi->lock, flags);
2859 port->port_attached = 1;
2860 phy->port = port;
2861 port->taskfileset = MVS_ID_NOT_MAPPED;
2862 if (phy->phy_type & PORT_TYPE_SAS) {
2863 port->wide_port_phymap = sas_port->phy_mask;
2864 mvs_update_wideport(mvi, sas_phy->id);
2865 }
2866 spin_unlock_irqrestore(&mvi->lock, flags);
2867}
2868
2869static int mvs_I_T_nexus_reset(struct domain_device *dev)
2870{
2871 return TMF_RESP_FUNC_FAILED;
2872}
2873
2874static int __devinit mvs_hw_init(struct mvs_info *mvi)
2875{
2876 void __iomem *regs = mvi->regs;
2877 int i;
2878 u32 tmp, cctl;
2879
2880 /* make sure interrupts are masked immediately (paranoia) */
2881 mw32(GBL_CTL, 0);
2882 tmp = mr32(GBL_CTL);
2883
2884 /* Reset Controller */
2885 if (!(tmp & HBA_RST)) {
2886 if (mvi->flags & MVF_PHY_PWR_FIX) {
2887 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2888 tmp &= ~PCTL_PWR_ON;
2889 tmp |= PCTL_OFF;
2890 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2891
2892 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2893 tmp &= ~PCTL_PWR_ON;
2894 tmp |= PCTL_OFF;
2895 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2896 }
2897
2898 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2899 mw32_f(GBL_CTL, HBA_RST);
2900 }
2901
2902 /* wait for reset to finish; timeout is just a guess */
2903 i = 1000;
2904 while (i-- > 0) {
2905 msleep(10);
2906
2907 if (!(mr32(GBL_CTL) & HBA_RST))
2908 break;
2909 }
2910 if (mr32(GBL_CTL) & HBA_RST) {
2911 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2912 return -EBUSY;
2913 }
2914
2915 /* Init Chip */
2916 /* make sure RST is set; HBA_RST /should/ have done that for us */
2917 cctl = mr32(CTL);
2918 if (cctl & CCTL_RST)
2919 cctl &= ~CCTL_RST;
2920 else
2921 mw32_f(CTL, cctl | CCTL_RST);
2922
2923 /* write to device control _AND_ device status register? - A.C. */
2924 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2925 tmp &= ~PRD_REQ_MASK;
2926 tmp |= PRD_REQ_SIZE;
2927 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2928
2929 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2930 tmp |= PCTL_PWR_ON;
2931 tmp &= ~PCTL_OFF;
2932 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2933
2934 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2935 tmp |= PCTL_PWR_ON;
2936 tmp &= ~PCTL_OFF;
2937 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2938
2939 mw32_f(CTL, cctl);
2940
2941 /* reset control */
2942 mw32(PCS, 0); /*MVS_PCS */
2943
2944 mvs_phy_hacks(mvi);
2945
2946 mw32(CMD_LIST_LO, mvi->slot_dma);
2947 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2948
2949 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2950 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2951
2952 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2953 mw32(TX_LO, mvi->tx_dma);
2954 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2955
2956 mw32(RX_CFG, MVS_RX_RING_SZ);
2957 mw32(RX_LO, mvi->rx_dma);
2958 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2959
2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(1100);
2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2966 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2967
2968 mvs_detect_porttype(mvi, i);
2969
2970 /* set phy local SAS address */
2971 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2972 mvs_write_port_cfg_data(mvi, i, lo);
2973 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2974 mvs_write_port_cfg_data(mvi, i, hi);
2975
2976 /* reset phy */
2977 tmp = mvs_read_phy_ctl(mvi, i);
2978 tmp |= PHY_RST;
2979 mvs_write_phy_ctl(mvi, i, tmp);
2980 }
2981
2982 msleep(100);
2983
2984 for (i = 0; i < mvi->chip->n_phy; i++) {
2985 /* clear phy int status */
2986 tmp = mvs_read_port_irq_stat(mvi, i);
2987 tmp &= ~PHYEV_SIG_FIS;
2988 mvs_write_port_irq_stat(mvi, i, tmp);
2989
2990 /* set phy int mask */
2991 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2992 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2993 mvs_write_port_irq_mask(mvi, i, tmp);
2994
2995 msleep(100);
2996 mvs_update_phyinfo(mvi, i, 1);
2997 mvs_enable_xmt(mvi, i);
2998 }
2999
3000 /* FIXME: update wide port bitmaps */
3001
3002 /* little endian for open address and command table, etc. */
3003 /* A.C.
3004 * it seems that ( from the spec ) turning on big-endian won't
3005 * do us any good on big-endian machines, need further confirmation
3006 */
3007 cctl = mr32(CTL);
3008 cctl |= CCTL_ENDIAN_CMD;
3009 cctl |= CCTL_ENDIAN_DATA;
3010 cctl &= ~CCTL_ENDIAN_OPEN;
3011 cctl |= CCTL_ENDIAN_RSP;
3012 mw32_f(CTL, cctl);
3013
3014 /* reset CMD queue */
3015 tmp = mr32(PCS);
3016 tmp |= PCS_CMD_RST;
3017 mw32(PCS, tmp);
3018 /* interrupt coalescing may cause missing HW interrput in some case,
3019 * and the max count is 0x1ff, while our max slot is 0x200,
3020 * it will make count 0.
3021 */
3022 tmp = 0;
3023 mw32(INT_COAL, tmp);
3024
3025 tmp = 0x100;
3026 mw32(INT_COAL_TMOUT, tmp);
3027
3028 /* ladies and gentlemen, start your engines */
3029 mw32(TX_CFG, 0);
3030 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
3031 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
3032 /* enable CMD/CMPL_Q/RESP mode */
3033 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
3034
3035 /* enable completion queue interrupt */
3036 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
3037 mw32(INT_MASK, tmp);
3038
3039 /* Enable SRS interrupt */
3040 mw32(INT_MASK_SRS, 0xFF);
3041 return 0;
3042}
3043
3044static void __devinit mvs_print_info(struct mvs_info *mvi)
3045{
3046 struct pci_dev *pdev = mvi->pdev;
3047 static int printed_version;
3048
3049 if (!printed_version++)
3050 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3051
3052 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
3053 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
3054}
3055
3056static int __devinit mvs_pci_init(struct pci_dev *pdev,
3057 const struct pci_device_id *ent)
3058{
3059 int rc;
3060 struct mvs_info *mvi;
3061 irq_handler_t irq_handler = mvs_interrupt;
3062
3063 rc = pci_enable_device(pdev);
3064 if (rc)
3065 return rc;
3066
3067 pci_set_master(pdev);
3068
3069 rc = pci_request_regions(pdev, DRV_NAME);
3070 if (rc)
3071 goto err_out_disable;
3072
3073 rc = pci_go_64(pdev);
3074 if (rc)
3075 goto err_out_regions;
3076
3077 mvi = mvs_alloc(pdev, ent);
3078 if (!mvi) {
3079 rc = -ENOMEM;
3080 goto err_out_regions;
3081 }
3082
3083 rc = mvs_hw_init(mvi);
3084 if (rc)
3085 goto err_out_mvi;
3086
3087#ifndef MVS_DISABLE_MSI
3088 if (!pci_enable_msi(pdev)) {
3089 u32 tmp;
3090 void __iomem *regs = mvi->regs;
3091 mvi->flags |= MVF_MSI;
3092 irq_handler = mvs_msi_interrupt;
3093 tmp = mr32(PCS);
3094 mw32(PCS, tmp | PCS_SELF_CLEAR);
3095 }
3096#endif
3097
3098 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
3099 if (rc)
3100 goto err_out_msi;
3101
3102 rc = scsi_add_host(mvi->shost, &pdev->dev);
3103 if (rc)
3104 goto err_out_irq;
3105
3106 rc = sas_register_ha(&mvi->sas);
3107 if (rc)
3108 goto err_out_shost;
3109
3110 pci_set_drvdata(pdev, mvi);
3111
3112 mvs_print_info(mvi);
3113
3114 mvs_hba_interrupt_enable(mvi);
3115
3116 scsi_scan_host(mvi->shost);
3117
3118 return 0;
3119
3120err_out_shost:
3121 scsi_remove_host(mvi->shost);
3122err_out_irq:
3123 free_irq(pdev->irq, mvi);
3124err_out_msi:
3125 if (mvi->flags |= MVF_MSI)
3126 pci_disable_msi(pdev);
3127err_out_mvi:
3128 mvs_free(mvi);
3129err_out_regions:
3130 pci_release_regions(pdev);
3131err_out_disable:
3132 pci_disable_device(pdev);
3133 return rc;
3134}
3135
3136static void __devexit mvs_pci_remove(struct pci_dev *pdev)
3137{
3138 struct mvs_info *mvi = pci_get_drvdata(pdev);
3139
3140 pci_set_drvdata(pdev, NULL);
3141
3142 if (mvi) {
3143 sas_unregister_ha(&mvi->sas);
3144 mvs_hba_interrupt_disable(mvi);
3145 sas_remove_host(mvi->shost);
3146 scsi_remove_host(mvi->shost);
3147
3148 free_irq(pdev->irq, mvi);
3149 if (mvi->flags & MVF_MSI)
3150 pci_disable_msi(pdev);
3151 mvs_free(mvi);
3152 pci_release_regions(pdev);
3153 }
3154 pci_disable_device(pdev);
3155}
3156
3157static struct sas_domain_function_template mvs_transport_ops = {
3158 .lldd_execute_task = mvs_task_exec,
3159 .lldd_control_phy = mvs_phy_control,
3160 .lldd_abort_task = mvs_task_abort,
3161 .lldd_port_formed = mvs_port_formed,
3162 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
3163};
3164
3165static struct pci_device_id __devinitdata mvs_pci_table[] = {
3166 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
3167 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
3168 {
3169 .vendor = PCI_VENDOR_ID_MARVELL,
3170 .device = 0x6440,
3171 .subvendor = PCI_ANY_ID,
3172 .subdevice = 0x6480,
3173 .class = 0,
3174 .class_mask = 0,
3175 .driver_data = chip_6480,
3176 },
3177 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
3178 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
3179
3180 { } /* terminate list */
3181};
3182
3183static struct pci_driver mvs_pci_driver = {
3184 .name = DRV_NAME,
3185 .id_table = mvs_pci_table,
3186 .probe = mvs_pci_init,
3187 .remove = __devexit_p(mvs_pci_remove),
3188};
3189
3190static int __init mvs_init(void)
3191{
3192 int rc;
3193
3194 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
3195 if (!mvs_stt)
3196 return -ENOMEM;
3197
3198 rc = pci_register_driver(&mvs_pci_driver);
3199 if (rc)
3200 goto err_out;
3201
3202 return 0;
3203
3204err_out:
3205 sas_release_transport(mvs_stt);
3206 return rc;
3207}
3208
3209static void __exit mvs_exit(void)
3210{
3211 pci_unregister_driver(&mvs_pci_driver);
3212 sas_release_transport(mvs_stt);
3213}
3214
3215module_init(mvs_init);
3216module_exit(mvs_exit);
3217
3218MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
3219MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
3220MODULE_VERSION(DRV_VERSION);
3221MODULE_LICENSE("GPL");
3222MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 000000000000..6de7af27e507
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,42 @@
1#
2# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the 88SE64XX/88SE94XX driver.
10#
11# The 88SE64XX/88SE94XX driver is free software; you can redistribute
12# it and/or modify it under the terms of the GNU General Public License
13# as published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_MVSAS
28 tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 select FW_LOADER
32 help
33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
34 PCI-E 88SE94XX chip based host adapters.
35
36config SCSI_MVSAS_DEBUG
37 bool "Compile in debug mode"
38 default y
39 depends on SCSI_MVSAS
40 help
41 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
42 the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 000000000000..52ac4264677d
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,32 @@
1#
2# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DMV_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
29mvsas-y += mv_init.o \
30 mv_sas.o \
31 mv_64xx.o \
32 mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 000000000000..10a5077b6aed
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,793 @@
1/*
2 * Marvell 88SE64xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_64xx.h"
27#include "mv_chips.h"
28
29static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 void __iomem *regs = mvi->regs;
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34
35 /* TODO check & save device type */
36 reg = mr32(MVS_GBL_PORT_TYPE);
37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
38 if (reg & MODE_SAS_SATA & (1 << i))
39 phy->phy_type |= PORT_TYPE_SAS;
40 else
41 phy->phy_type |= PORT_TYPE_SATA;
42}
43
44static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
45{
46 void __iomem *regs = mvi->regs;
47 u32 tmp;
48
49 tmp = mr32(MVS_PCS);
50 if (mvi->chip->n_phy <= 4)
51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
52 else
53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
54 mw32(MVS_PCS, tmp);
55}
56
57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
58{
59 void __iomem *regs = mvi->regs;
60
61 mvs_phy_hacks(mvi);
62
63 if (!(mvi->flags & MVF_FLAG_SOC)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR + 0, 0x8);
66 mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
67
68 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
69 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
70
71 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
72 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
73
74 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
75 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
76 } else {
77 int i;
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE, 0);
80 for (i = 0; i < mvi->chip->n_phy; i++) {
81 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
82 mvs_write_port_vsr_data(mvi, i, 0x90000000);
83 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
84 mvs_write_port_vsr_data(mvi, i, 0x50f2);
85 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
86 mvs_write_port_vsr_data(mvi, i, 0x0e);
87 }
88 }
89}
90
91static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
92{
93 void __iomem *regs = mvi->regs;
94 u32 reg, tmp;
95
96 if (!(mvi->flags & MVF_FLAG_SOC)) {
97 if (phy_id < 4)
98 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
99 else
100 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
101
102 } else
103 reg = mr32(MVS_PHY_CTL);
104
105 tmp = reg;
106 if (phy_id < 4)
107 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
108 else
109 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
110
111 if (!(mvi->flags & MVF_FLAG_SOC)) {
112 if (phy_id < 4) {
113 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
114 mdelay(10);
115 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
116 } else {
117 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
118 mdelay(10);
119 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
120 }
121 } else {
122 mw32(MVS_PHY_CTL, tmp);
123 mdelay(10);
124 mw32(MVS_PHY_CTL, reg);
125 }
126}
127
128static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
129{
130 u32 tmp;
131 tmp = mvs_read_port_irq_stat(mvi, phy_id);
132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard)
136 tmp |= PHY_RST_HARD;
137 else
138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) {
141 do {
142 tmp = mvs_read_phy_ctl(mvi, phy_id);
143 } while (tmp & PHY_RST_HARD);
144 }
145}
146
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 int i;
152
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL, 0);
155 tmp = mr32(MVS_GBL_CTL);
156
157 /* Reset Controller */
158 if (!(tmp & HBA_RST)) {
159 if (mvi->flags & MVF_PHY_PWR_FIX) {
160 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
161 tmp &= ~PCTL_PWR_OFF;
162 tmp |= PCTL_PHY_DSBL;
163 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
164
165 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
166 tmp &= ~PCTL_PWR_OFF;
167 tmp |= PCTL_PHY_DSBL;
168 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 }
170 }
171
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL, 0);
174 tmp = mr32(MVS_GBL_CTL);
175
176 /* Reset Controller */
177 if (!(tmp & HBA_RST)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL, HBA_RST);
180 }
181
182 /* wait for reset to finish; timeout is just a guess */
183 i = 1000;
184 while (i-- > 0) {
185 msleep(10);
186
187 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
188 break;
189 }
190 if (mr32(MVS_GBL_CTL) & HBA_RST) {
191 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
192 return -EBUSY;
193 }
194 return 0;
195}
196
197static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
198{
199 void __iomem *regs = mvi->regs;
200 u32 tmp;
201 if (!(mvi->flags & MVF_FLAG_SOC)) {
202 u32 offs;
203 if (phy_id < 4)
204 offs = PCR_PHY_CTL;
205 else {
206 offs = PCR_PHY_CTL2;
207 phy_id -= 4;
208 }
209 pci_read_config_dword(mvi->pdev, offs, &tmp);
210 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
211 pci_write_config_dword(mvi->pdev, offs, tmp);
212 } else {
213 tmp = mr32(MVS_PHY_CTL);
214 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
215 mw32(MVS_PHY_CTL, tmp);
216 }
217}
218
219static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
220{
221 void __iomem *regs = mvi->regs;
222 u32 tmp;
223 if (!(mvi->flags & MVF_FLAG_SOC)) {
224 u32 offs;
225 if (phy_id < 4)
226 offs = PCR_PHY_CTL;
227 else {
228 offs = PCR_PHY_CTL2;
229 phy_id -= 4;
230 }
231 pci_read_config_dword(mvi->pdev, offs, &tmp);
232 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
233 pci_write_config_dword(mvi->pdev, offs, tmp);
234 } else {
235 tmp = mr32(MVS_PHY_CTL);
236 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
237 mw32(MVS_PHY_CTL, tmp);
238 }
239}
240
241static int __devinit mvs_64xx_init(struct mvs_info *mvi)
242{
243 void __iomem *regs = mvi->regs;
244 int i;
245 u32 tmp, cctl;
246
247 if (mvi->pdev && mvi->pdev->revision == 0)
248 mvi->flags |= MVF_PHY_PWR_FIX;
249 if (!(mvi->flags & MVF_FLAG_SOC)) {
250 mvs_show_pcie_usage(mvi);
251 tmp = mvs_64xx_chip_reset(mvi);
252 if (tmp)
253 return tmp;
254 } else {
255 tmp = mr32(MVS_PHY_CTL);
256 tmp &= ~PCTL_PWR_OFF;
257 tmp |= PCTL_PHY_DSBL;
258 mw32(MVS_PHY_CTL, tmp);
259 }
260
261 /* Init Chip */
262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl = mr32(MVS_CTL) & 0xFFFF;
264 if (cctl & CCTL_RST)
265 cctl &= ~CCTL_RST;
266 else
267 mw32_f(MVS_CTL, cctl | CCTL_RST);
268
269 if (!(mvi->flags & MVF_FLAG_SOC)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
272 tmp &= ~PRD_REQ_MASK;
273 tmp |= PRD_REQ_SIZE;
274 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
275
276 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
277 tmp &= ~PCTL_PWR_OFF;
278 tmp &= ~PCTL_PHY_DSBL;
279 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
280
281 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
282 tmp &= PCTL_PWR_OFF;
283 tmp &= ~PCTL_PHY_DSBL;
284 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
285 } else {
286 tmp = mr32(MVS_PHY_CTL);
287 tmp &= ~PCTL_PWR_OFF;
288 tmp |= PCTL_COM_ON;
289 tmp &= ~PCTL_PHY_DSBL;
290 tmp |= PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 tmp &= ~PCTL_LINK_RST;
294 mw32(MVS_PHY_CTL, tmp);
295 msleep(100);
296 }
297
298 /* reset control */
299 mw32(MVS_PCS, 0); /* MVS_PCS */
300 /* init phys */
301 mvs_64xx_phy_hacks(mvi);
302
303 /* enable auto port detection */
304 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
305
306 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
307 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
308
309 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
310 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
311
312 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
313 mw32(MVS_TX_LO, mvi->tx_dma);
314 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
315
316 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
317 mw32(MVS_RX_LO, mvi->rx_dma);
318 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
319
320 for (i = 0; i < mvi->chip->n_phy; i++) {
321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
324 cpu_to_be64(mvi->phy[i].dev_sas_addr));
325
326 mvs_64xx_enable_xmt(mvi, i);
327
328 mvs_64xx_phy_reset(mvi, i, 1);
329 msleep(500);
330 mvs_64xx_detect_porttype(mvi, i);
331 }
332 if (mvi->flags & MVF_FLAG_SOC) {
333 /* set select registers */
334 writel(0x0E008000, regs + 0x000);
335 writel(0x59000008, regs + 0x004);
336 writel(0x20, regs + 0x008);
337 writel(0x20, regs + 0x00c);
338 writel(0x20, regs + 0x010);
339 writel(0x20, regs + 0x014);
340 writel(0x20, regs + 0x018);
341 writel(0x20, regs + 0x01c);
342 }
343 for (i = 0; i < mvi->chip->n_phy; i++) {
344 /* clear phy int status */
345 tmp = mvs_read_port_irq_stat(mvi, i);
346 tmp &= ~PHYEV_SIG_FIS;
347 mvs_write_port_irq_stat(mvi, i, tmp);
348
349 /* set phy int mask */
350 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
351 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
352 PHYEV_DEC_ERR;
353 mvs_write_port_irq_mask(mvi, i, tmp);
354
355 msleep(100);
356 mvs_update_phyinfo(mvi, i, 1);
357 }
358
359 /* FIXME: update wide port bitmaps */
360
361 /* little endian for open address and command table, etc. */
362 /*
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
365 */
366 cctl = mr32(MVS_CTL);
367 cctl |= CCTL_ENDIAN_CMD;
368 cctl |= CCTL_ENDIAN_DATA;
369 cctl &= ~CCTL_ENDIAN_OPEN;
370 cctl |= CCTL_ENDIAN_RSP;
371 mw32_f(MVS_CTL, cctl);
372
373 /* reset CMD queue */
374 tmp = mr32(MVS_PCS);
375 tmp |= PCS_CMD_RST;
376 mw32(MVS_PCS, tmp);
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
380 */
381 tmp = 0;
382 mw32(MVS_INT_COAL, tmp);
383
384 tmp = 0x100;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
405}
406
407static int mvs_64xx_ioremap(struct mvs_info *mvi)
408{
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412}
413
414static void mvs_64xx_iounmap(struct mvs_info *mvi)
415{
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418}
419
420static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
421{
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
424
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427}
428
429static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430{
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
436}
437
438static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
439{
440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451}
452
453static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454{
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459#ifndef MVS_USE_TASKLET
460 spin_lock(&mvi->lock);
461#endif
462 mvs_int_full(mvi);
463#ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi->lock);
465#endif
466 return IRQ_HANDLED;
467}
468
469static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
470{
471 u32 tmp;
472 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
473 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
474 do {
475 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
476 } while (tmp & 1 << (slot_idx % 32));
477 do {
478 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
479 } while (tmp & 1 << (slot_idx % 32));
480}
481
482static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
483 u32 tfs)
484{
485 void __iomem *regs = mvi->regs;
486 u32 tmp;
487
488 if (type == PORT_TYPE_SATA) {
489 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
490 mw32(MVS_INT_STAT_SRS_0, tmp);
491 }
492 mw32(MVS_INT_STAT, CINT_CI_STOP);
493 tmp = mr32(MVS_PCS) | 0xFF00;
494 mw32(MVS_PCS, tmp);
495}
496
497static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
498{
499 void __iomem *regs = mvi->regs;
500 u32 tmp, offs;
501
502 if (*tfs == MVS_ID_NOT_MAPPED)
503 return;
504
505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
506 if (*tfs < 16) {
507 tmp = mr32(MVS_PCS);
508 mw32(MVS_PCS, tmp & ~offs);
509 } else {
510 tmp = mr32(MVS_CTL);
511 mw32(MVS_CTL, tmp & ~offs);
512 }
513
514 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
515 if (tmp)
516 mw32(MVS_INT_STAT_SRS_0, tmp);
517
518 *tfs = MVS_ID_NOT_MAPPED;
519 return;
520}
521
522static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
523{
524 int i;
525 u32 tmp, offs;
526 void __iomem *regs = mvi->regs;
527
528 if (*tfs != MVS_ID_NOT_MAPPED)
529 return 0;
530
531 tmp = mr32(MVS_PCS);
532
533 for (i = 0; i < mvi->chip->srs_sz; i++) {
534 if (i == 16)
535 tmp = mr32(MVS_CTL);
536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
537 if (!(tmp & offs)) {
538 *tfs = i;
539
540 if (i < 16)
541 mw32(MVS_PCS, tmp | offs);
542 else
543 mw32(MVS_CTL, tmp | offs);
544 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
545 if (tmp)
546 mw32(MVS_INT_STAT_SRS_0, tmp);
547 return 0;
548 }
549 }
550 return MVS_ID_NOT_MAPPED;
551}
552
553void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
554{
555 int i;
556 struct scatterlist *sg;
557 struct mvs_prd *buf_prd = prd;
558 for_each_sg(scatter, sg, nr, i) {
559 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
560 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
561 buf_prd++;
562 }
563}
564
565static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
566{
567 u32 phy_st;
568 mvs_write_port_cfg_addr(mvi, i,
569 PHYR_PHY_STAT);
570 phy_st = mvs_read_port_cfg_data(mvi, i);
571 if (phy_st & PHY_OOB_DTCTD)
572 return 1;
573 return 0;
574}
575
576static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
577 struct sas_identify_frame *id)
578
579{
580 struct mvs_phy *phy = &mvi->phy[i];
581 struct asd_sas_phy *sas_phy = &phy->sas_phy;
582
583 sas_phy->linkrate =
584 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
586
587 phy->minimum_linkrate =
588 (phy->phy_status &
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
590 phy->maximum_linkrate =
591 (phy->phy_status &
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
593
594 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
595 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
596
597 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
598 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
599
600 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
601 phy->att_dev_sas_addr =
602 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
603 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
604 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
605 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606}
607
608static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
609{
610 u32 tmp;
611 struct mvs_phy *phy = &mvi->phy[i];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
614 tmp = mvs_read_port_vsr_data(mvi, i);
615 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
617 SAS_LINK_RATE_1_5_GBPS)
618 tmp &= ~PHY_MODE6_LATECLK;
619 else
620 tmp |= PHY_MODE6_LATECLK;
621 mvs_write_port_vsr_data(mvi, i, tmp);
622}
623
624void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
625 struct sas_phy_linkrates *rates)
626{
627 u32 lrmin = 0, lrmax = 0;
628 u32 tmp;
629
630 tmp = mvs_read_phy_ctl(mvi, phy_id);
631 lrmin = (rates->minimum_linkrate << 8);
632 lrmax = (rates->maximum_linkrate << 12);
633
634 if (lrmin) {
635 tmp &= ~(0xf << 8);
636 tmp |= lrmin;
637 }
638 if (lrmax) {
639 tmp &= ~(0xf << 12);
640 tmp |= lrmax;
641 }
642 mvs_write_phy_ctl(mvi, phy_id, tmp);
643 mvs_64xx_phy_reset(mvi, phy_id, 1);
644}
645
646static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
647{
648 u32 tmp;
649 void __iomem *regs = mvi->regs;
650 tmp = mr32(MVS_PCS);
651 mw32(MVS_PCS, tmp & 0xFFFF);
652 mw32(MVS_PCS, tmp);
653 tmp = mr32(MVS_CTL);
654 mw32(MVS_CTL, tmp & 0xFFFF);
655 mw32(MVS_CTL, tmp);
656}
657
658
659u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
660{
661 void __iomem *regs = mvi->regs_ex;
662 return ior32(SPI_DATA_REG_64XX);
663}
664
665void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
666{
667 void __iomem *regs = mvi->regs_ex;
668 iow32(SPI_DATA_REG_64XX, data);
669}
670
671
672int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
673 u32 *dwCmd,
674 u8 cmd,
675 u8 read,
676 u8 length,
677 u32 addr
678 )
679{
680 u32 dwTmp;
681
682 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
683 if (read)
684 dwTmp |= 1U<<23;
685
686 if (addr != MV_MAX_U32) {
687 dwTmp |= 1U<<22;
688 dwTmp |= (addr & 0x0003FFFF);
689 }
690
691 *dwCmd = dwTmp;
692 return 0;
693}
694
695
696int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
697{
698 void __iomem *regs = mvi->regs_ex;
699 int retry;
700
701 for (retry = 0; retry < 1; retry++) {
702 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
703 iow32(SPI_CMD_REG_64XX, cmd);
704 iow32(SPI_CTRL_REG_64XX,
705 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 }
707
708 return 0;
709}
710
711int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
712{
713 void __iomem *regs = mvi->regs_ex;
714 u32 i, dwTmp;
715
716 for (i = 0; i < timeout; i++) {
717 dwTmp = ior32(SPI_CTRL_REG_64XX);
718 if (!(dwTmp & SPI_CTRL_SPISTART))
719 return 0;
720 msleep(10);
721 }
722
723 return -1;
724}
725
726#ifndef DISABLE_HOTPLUG_DMA_FIX
727void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
728{
729 int i;
730 struct mvs_prd *buf_prd = prd;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737}
738#endif
739
740const struct mvs_dispatch mvs_64xx_dispatch = {
741 "mv64xx",
742 mvs_64xx_init,
743 NULL,
744 mvs_64xx_ioremap,
745 mvs_64xx_iounmap,
746 mvs_64xx_isr,
747 mvs_64xx_isr_status,
748 mvs_64xx_interrupt_enable,
749 mvs_64xx_interrupt_disable,
750 mvs_read_phy_ctl,
751 mvs_write_phy_ctl,
752 mvs_read_port_cfg_data,
753 mvs_write_port_cfg_data,
754 mvs_write_port_cfg_addr,
755 mvs_read_port_vsr_data,
756 mvs_write_port_vsr_data,
757 mvs_write_port_vsr_addr,
758 mvs_read_port_irq_stat,
759 mvs_write_port_irq_stat,
760 mvs_read_port_irq_mask,
761 mvs_write_port_irq_mask,
762 mvs_get_sas_addr,
763 mvs_64xx_command_active,
764 mvs_64xx_issue_stop,
765 mvs_start_delivery,
766 mvs_rx_update,
767 mvs_int_full,
768 mvs_64xx_assign_reg_set,
769 mvs_64xx_free_reg_set,
770 mvs_get_prd_size,
771 mvs_get_prd_count,
772 mvs_64xx_make_prd,
773 mvs_64xx_detect_porttype,
774 mvs_64xx_oob_done,
775 mvs_64xx_fix_phy_info,
776 mvs_64xx_phy_work_around,
777 mvs_64xx_phy_set_link_rate,
778 mvs_hw_max_link_rate,
779 mvs_64xx_phy_disable,
780 mvs_64xx_phy_enable,
781 mvs_64xx_phy_reset,
782 mvs_64xx_stp_reset,
783 mvs_64xx_clear_active_cmds,
784 mvs_64xx_spi_read_data,
785 mvs_64xx_spi_write_data,
786 mvs_64xx_spi_buildcmd,
787 mvs_64xx_spi_issuecmd,
788 mvs_64xx_spi_waitdataready,
789#ifndef DISABLE_HOTPLUG_DMA_FIX
790 mvs_64xx_fix_dma,
791#endif
792};
793
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 000000000000..42e947d9795e
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,151 @@
1/*
2 * Marvell 88SE64xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS64XX_REG_H_
26#define _MVS64XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
31
32/* enhanced mode registers (BAR4) */
33enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x08, /* global irq status */
36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
37
38 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
39 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
40
41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
42
43 MVS_CTL = 0x100, /* SAS/SATA port configuration */
44 MVS_PCS = 0x104, /* SAS/SATA port control/status */
45 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
46 MVS_CMD_LIST_HI = 0x10C,
47 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
48 MVS_RX_FIS_HI = 0x114,
49
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67
68 /* ports 1-3 follow after this */
69 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
70 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
71 /* ports 5-7 follow after this */
72 MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
73 MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
77 /* ports 5-7 follow after this */
78 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
79
80 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
81 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
82
83 /* ports 1-3 follow after this */
84 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
85 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
86 /* ports 5-7 follow after this */
87 MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
88 MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
89
90 /* ports 1-3 follow after this */
91 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
92 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
93 /* ports 5-7 follow after this */
94 MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
95 MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
96};
97
98enum pci_cfg_registers {
99 PCR_PHY_CTL = 0x40,
100 PCR_PHY_CTL2 = 0x90,
101 PCR_DEV_CTRL = 0xE8,
102 PCR_LINK_STAT = 0xF2,
103};
104
105/* SAS/SATA Vendor Specific Port Registers */
106enum sas_sata_vsp_regs {
107 VSR_PHY_STAT = 0x00, /* Phy Status */
108 VSR_PHY_MODE1 = 0x01, /* phy tx */
109 VSR_PHY_MODE2 = 0x02, /* tx scc */
110 VSR_PHY_MODE3 = 0x03, /* pll */
111 VSR_PHY_MODE4 = 0x04, /* VCO */
112 VSR_PHY_MODE5 = 0x05, /* Rx */
113 VSR_PHY_MODE6 = 0x06, /* CDR */
114 VSR_PHY_MODE7 = 0x07, /* Impedance */
115 VSR_PHY_MODE8 = 0x08, /* Voltage */
116 VSR_PHY_MODE9 = 0x09, /* Test */
117 VSR_PHY_MODE10 = 0x0A, /* Power */
118 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
119 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
120 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
121};
122
123enum chip_register_bits {
124 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
125 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
126 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
127 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
128 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
129};
130
131#define MAX_SG_ENTRY 64
132
133struct mvs_prd {
134 __le64 addr; /* 64-bit buffer address */
135 __le32 reserved;
136 __le32 len; /* 16-bit length */
137};
138
139#define SPI_CTRL_REG 0xc0
140#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
141#define SPI_CTRL_SPIRDY (1U<<22)
142#define SPI_CTRL_SPISTART (1U<<20)
143
144#define SPI_CMD_REG 0xc4
145#define SPI_DATA_REG 0xc8
146
147#define SPI_CTRL_REG_64XX 0x10
148#define SPI_CMD_REG_64XX 0x14
149#define SPI_DATA_REG_64XX 0x18
150
151#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 000000000000..0940fae19d20
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
1/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_94xx.h"
27#include "mv_chips.h"
28
29static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i];
33 u32 phy_status;
34
35 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
36 reg = mvs_read_port_vsr_data(mvi, i);
37 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 switch (phy_status) {
40 case 0x10:
41 phy->phy_type |= PORT_TYPE_SAS;
42 break;
43 case 0x1d:
44 default:
45 phy->phy_type |= PORT_TYPE_SATA;
46 break;
47 }
48}
49
50static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
51{
52 void __iomem *regs = mvi->regs;
53 u32 tmp;
54
55 tmp = mr32(MVS_PCS);
56 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
57 mw32(MVS_PCS, tmp);
58}
59
60static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
61{
62 u32 tmp;
63
64 tmp = mvs_read_port_irq_stat(mvi, phy_id);
65 tmp &= ~PHYEV_RDY_CH;
66 mvs_write_port_irq_stat(mvi, phy_id, tmp);
67 if (hard) {
68 tmp = mvs_read_phy_ctl(mvi, phy_id);
69 tmp |= PHY_RST_HARD;
70 mvs_write_phy_ctl(mvi, phy_id, tmp);
71 do {
72 tmp = mvs_read_phy_ctl(mvi, phy_id);
73 } while (tmp & PHY_RST_HARD);
74 } else {
75 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
76 tmp = mvs_read_port_vsr_data(mvi, phy_id);
77 tmp |= PHY_RST;
78 mvs_write_port_vsr_data(mvi, phy_id, tmp);
79 }
80}
81
82static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
83{
84 u32 tmp;
85 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
86 tmp = mvs_read_port_vsr_data(mvi, phy_id);
87 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
88}
89
90static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
91{
92 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
93 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
94 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
95 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
96 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
97 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
98}
99
100static int __devinit mvs_94xx_init(struct mvs_info *mvi)
101{
102 void __iomem *regs = mvi->regs;
103 int i;
104 u32 tmp, cctl;
105
106 mvs_show_pcie_usage(mvi);
107 if (mvi->flags & MVF_FLAG_SOC) {
108 tmp = mr32(MVS_PHY_CTL);
109 tmp &= ~PCTL_PWR_OFF;
110 tmp |= PCTL_PHY_DSBL;
111 mw32(MVS_PHY_CTL, tmp);
112 }
113
114 /* Init Chip */
115 /* make sure RST is set; HBA_RST /should/ have done that for us */
116 cctl = mr32(MVS_CTL) & 0xFFFF;
117 if (cctl & CCTL_RST)
118 cctl &= ~CCTL_RST;
119 else
120 mw32_f(MVS_CTL, cctl | CCTL_RST);
121
122 if (mvi->flags & MVF_FLAG_SOC) {
123 tmp = mr32(MVS_PHY_CTL);
124 tmp &= ~PCTL_PWR_OFF;
125 tmp |= PCTL_COM_ON;
126 tmp &= ~PCTL_PHY_DSBL;
127 tmp |= PCTL_LINK_RST;
128 mw32(MVS_PHY_CTL, tmp);
129 msleep(100);
130 tmp &= ~PCTL_LINK_RST;
131 mw32(MVS_PHY_CTL, tmp);
132 msleep(100);
133 }
134
135 /* reset control */
136 mw32(MVS_PCS, 0); /* MVS_PCS */
137 mw32(MVS_STP_REG_SET_0, 0);
138 mw32(MVS_STP_REG_SET_1, 0);
139
140 /* init phys */
141 mvs_phy_hacks(mvi);
142
143 /* disable Multiplexing, enable phy implemented */
144 mw32(MVS_PORTS_IMP, 0xFF);
145
146
147 mw32(MVS_PA_VSR_ADDR, 0x00000104);
148 mw32(MVS_PA_VSR_PORT, 0x00018080);
149 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
150 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
151
152 /* set LED blink when IO*/
153 mw32(MVS_PA_VSR_ADDR, 0x00000030);
154 tmp = mr32(MVS_PA_VSR_PORT);
155 tmp &= 0xFFFF00FF;
156 tmp |= 0x00003300;
157 mw32(MVS_PA_VSR_PORT, tmp);
158
159 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
160 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
161
162 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
163 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
164
165 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
166 mw32(MVS_TX_LO, mvi->tx_dma);
167 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
168
169 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
170 mw32(MVS_RX_LO, mvi->rx_dma);
171 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
172
173 for (i = 0; i < mvi->chip->n_phy; i++) {
174 mvs_94xx_phy_disable(mvi, i);
175 /* set phy local SAS address */
176 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
177 (mvi->phy[i].dev_sas_addr));
178
179 mvs_94xx_enable_xmt(mvi, i);
180 mvs_94xx_phy_enable(mvi, i);
181
182 mvs_94xx_phy_reset(mvi, i, 1);
183 msleep(500);
184 mvs_94xx_detect_porttype(mvi, i);
185 }
186
187 if (mvi->flags & MVF_FLAG_SOC) {
188 /* set select registers */
189 writel(0x0E008000, regs + 0x000);
190 writel(0x59000008, regs + 0x004);
191 writel(0x20, regs + 0x008);
192 writel(0x20, regs + 0x00c);
193 writel(0x20, regs + 0x010);
194 writel(0x20, regs + 0x014);
195 writel(0x20, regs + 0x018);
196 writel(0x20, regs + 0x01c);
197 }
198 for (i = 0; i < mvi->chip->n_phy; i++) {
199 /* clear phy int status */
200 tmp = mvs_read_port_irq_stat(mvi, i);
201 tmp &= ~PHYEV_SIG_FIS;
202 mvs_write_port_irq_stat(mvi, i, tmp);
203
204 /* set phy int mask */
205 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
206 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
207 mvs_write_port_irq_mask(mvi, i, tmp);
208
209 msleep(100);
210 mvs_update_phyinfo(mvi, i, 1);
211 }
212
213 /* FIXME: update wide port bitmaps */
214
215 /* little endian for open address and command table, etc. */
216 /*
217 * it seems that ( from the spec ) turning on big-endian won't
218 * do us any good on big-endian machines, need further confirmation
219 */
220 cctl = mr32(MVS_CTL);
221 cctl |= CCTL_ENDIAN_CMD;
222 cctl |= CCTL_ENDIAN_DATA;
223 cctl &= ~CCTL_ENDIAN_OPEN;
224 cctl |= CCTL_ENDIAN_RSP;
225 mw32_f(MVS_CTL, cctl);
226
227 /* reset CMD queue */
228 tmp = mr32(MVS_PCS);
229 tmp |= PCS_CMD_RST;
230 mw32(MVS_PCS, tmp);
231 /* interrupt coalescing may cause missing HW interrput in some case,
232 * and the max count is 0x1ff, while our max slot is 0x200,
233 * it will make count 0.
234 */
235 tmp = 0;
236 mw32(MVS_INT_COAL, tmp);
237
238 tmp = 0x100;
239 mw32(MVS_INT_COAL_TMOUT, tmp);
240
241 /* ladies and gentlemen, start your engines */
242 mw32(MVS_TX_CFG, 0);
243 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
244 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
245 /* enable CMD/CMPL_Q/RESP mode */
246 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
247 PCS_CMD_EN | PCS_CMD_STOP_ERR);
248
249 /* enable completion queue interrupt */
250 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
251 CINT_DMA_PCIE);
252 tmp |= CINT_PHY_MASK;
253 mw32(MVS_INT_MASK, tmp);
254
255 /* Enable SRS interrupt */
256 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
257
258 return 0;
259}
260
261static int mvs_94xx_ioremap(struct mvs_info *mvi)
262{
263 if (!mvs_ioremap(mvi, 2, -1)) {
264 mvi->regs_ex = mvi->regs + 0x10200;
265 mvi->regs += 0x20000;
266 if (mvi->id == 1)
267 mvi->regs += 0x4000;
268 return 0;
269 }
270 return -1;
271}
272
273static void mvs_94xx_iounmap(struct mvs_info *mvi)
274{
275 if (mvi->regs) {
276 mvi->regs -= 0x20000;
277 if (mvi->id == 1)
278 mvi->regs -= 0x4000;
279 mvs_iounmap(mvi->regs);
280 }
281}
282
283static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
284{
285 void __iomem *regs = mvi->regs_ex;
286 u32 tmp;
287
288 tmp = mr32(MVS_GBL_CTL);
289 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
290 mw32(MVS_GBL_INT_STAT, tmp);
291 writel(tmp, regs + 0x0C);
292 writel(tmp, regs + 0x10);
293 writel(tmp, regs + 0x14);
294 writel(tmp, regs + 0x18);
295 mw32(MVS_GBL_CTL, tmp);
296}
297
298static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
299{
300 void __iomem *regs = mvi->regs_ex;
301 u32 tmp;
302
303 tmp = mr32(MVS_GBL_CTL);
304
305 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
306 mw32(MVS_GBL_INT_STAT, tmp);
307 writel(tmp, regs + 0x0C);
308 writel(tmp, regs + 0x10);
309 writel(tmp, regs + 0x14);
310 writel(tmp, regs + 0x18);
311 mw32(MVS_GBL_CTL, tmp);
312}
313
314static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
315{
316 void __iomem *regs = mvi->regs_ex;
317 u32 stat = 0;
318 if (!(mvi->flags & MVF_FLAG_SOC)) {
319 stat = mr32(MVS_GBL_INT_STAT);
320
321 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
322 return 0;
323 }
324 return stat;
325}
326
327static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
328{
329 void __iomem *regs = mvi->regs;
330
331 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
332 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
333 mw32_f(MVS_INT_STAT, CINT_DONE);
334 #ifndef MVS_USE_TASKLET
335 spin_lock(&mvi->lock);
336 #endif
337 mvs_int_full(mvi);
338 #ifndef MVS_USE_TASKLET
339 spin_unlock(&mvi->lock);
340 #endif
341 }
342 return IRQ_HANDLED;
343}
344
345static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
346{
347 u32 tmp;
348 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
349 do {
350 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
351 } while (tmp & 1 << (slot_idx % 32));
352}
353
354static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
355 u32 tfs)
356{
357 void __iomem *regs = mvi->regs;
358 u32 tmp;
359
360 if (type == PORT_TYPE_SATA) {
361 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
362 mw32(MVS_INT_STAT_SRS_0, tmp);
363 }
364 mw32(MVS_INT_STAT, CINT_CI_STOP);
365 tmp = mr32(MVS_PCS) | 0xFF00;
366 mw32(MVS_PCS, tmp);
367}
368
369static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
370{
371 void __iomem *regs = mvi->regs;
372 u32 tmp;
373 u8 reg_set = *tfs;
374
375 if (*tfs == MVS_ID_NOT_MAPPED)
376 return;
377
378 mvi->sata_reg_set &= ~bit(reg_set);
379 if (reg_set < 32) {
380 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
381 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
382 if (tmp)
383 mw32(MVS_INT_STAT_SRS_0, tmp);
384 } else {
385 w_reg_set_enable(reg_set, mvi->sata_reg_set);
386 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
387 if (tmp)
388 mw32(MVS_INT_STAT_SRS_1, tmp);
389 }
390
391 *tfs = MVS_ID_NOT_MAPPED;
392
393 return;
394}
395
396static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
397{
398 int i;
399 void __iomem *regs = mvi->regs;
400
401 if (*tfs != MVS_ID_NOT_MAPPED)
402 return 0;
403
404 i = mv_ffc64(mvi->sata_reg_set);
405 if (i > 32) {
406 mvi->sata_reg_set |= bit(i);
407 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
408 *tfs = i;
409 return 0;
410 } else if (i >= 0) {
411 mvi->sata_reg_set |= bit(i);
412 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
413 *tfs = i;
414 return 0;
415 }
416 return MVS_ID_NOT_MAPPED;
417}
418
419static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
420{
421 int i;
422 struct scatterlist *sg;
423 struct mvs_prd *buf_prd = prd;
424 for_each_sg(scatter, sg, nr, i) {
425 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
426 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
427 buf_prd++;
428 }
429}
430
431static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
432{
433 u32 phy_st;
434 phy_st = mvs_read_phy_ctl(mvi, i);
435 if (phy_st & PHY_READY_MASK) /* phy ready */
436 return 1;
437 return 0;
438}
439
440static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
441 struct sas_identify_frame *id)
442{
443 int i;
444 u32 id_frame[7];
445
446 for (i = 0; i < 7; i++) {
447 mvs_write_port_cfg_addr(mvi, port_id,
448 CONFIG_ID_FRAME0 + i * 4);
449 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
450 }
451 memcpy(id, id_frame, 28);
452}
453
454static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
455 struct sas_identify_frame *id)
456{
457 int i;
458 u32 id_frame[7];
459
460 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
461 for (i = 0; i < 7; i++) {
462 mvs_write_port_cfg_addr(mvi, port_id,
463 CONFIG_ATT_ID_FRAME0 + i * 4);
464 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
465 mv_dprintk("94xx phy %d atta frame %d %x.\n",
466 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
467 }
468 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
469 memcpy(id, id_frame, 28);
470}
471
472static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
473{
474 u32 att_dev_info = 0;
475
476 att_dev_info |= id->dev_type;
477 if (id->stp_iport)
478 att_dev_info |= PORT_DEV_STP_INIT;
479 if (id->smp_iport)
480 att_dev_info |= PORT_DEV_SMP_INIT;
481 if (id->ssp_iport)
482 att_dev_info |= PORT_DEV_SSP_INIT;
483 if (id->stp_tport)
484 att_dev_info |= PORT_DEV_STP_TRGT;
485 if (id->smp_tport)
486 att_dev_info |= PORT_DEV_SMP_TRGT;
487 if (id->ssp_tport)
488 att_dev_info |= PORT_DEV_SSP_TRGT;
489
490 att_dev_info |= (u32)id->phy_id<<24;
491 return att_dev_info;
492}
493
494static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
495{
496 return mvs_94xx_make_dev_info(id);
497}
498
499static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
500 struct sas_identify_frame *id)
501{
502 struct mvs_phy *phy = &mvi->phy[i];
503 struct asd_sas_phy *sas_phy = &phy->sas_phy;
504 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
505 sas_phy->linkrate =
506 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
507 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
508 sas_phy->linkrate += 0x8;
509 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
510 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
511 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
512 mvs_94xx_get_dev_identify_frame(mvi, i, id);
513 phy->dev_info = mvs_94xx_make_dev_info(id);
514
515 if (phy->phy_type & PORT_TYPE_SAS) {
516 mvs_94xx_get_att_identify_frame(mvi, i, id);
517 phy->att_dev_info = mvs_94xx_make_att_info(id);
518 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
519 } else {
520 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
521 }
522
523}
524
525void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
526 struct sas_phy_linkrates *rates)
527{
528 /* TODO */
529}
530
531static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
532{
533 u32 tmp;
534 void __iomem *regs = mvi->regs;
535 tmp = mr32(MVS_STP_REG_SET_0);
536 mw32(MVS_STP_REG_SET_0, 0);
537 mw32(MVS_STP_REG_SET_0, tmp);
538 tmp = mr32(MVS_STP_REG_SET_1);
539 mw32(MVS_STP_REG_SET_1, 0);
540 mw32(MVS_STP_REG_SET_1, tmp);
541}
542
543
544u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
545{
546 void __iomem *regs = mvi->regs_ex - 0x10200;
547 return mr32(SPI_RD_DATA_REG_94XX);
548}
549
550void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
551{
552 void __iomem *regs = mvi->regs_ex - 0x10200;
553 mw32(SPI_RD_DATA_REG_94XX, data);
554}
555
556
557int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
558 u32 *dwCmd,
559 u8 cmd,
560 u8 read,
561 u8 length,
562 u32 addr
563 )
564{
565 void __iomem *regs = mvi->regs_ex - 0x10200;
566 u32 dwTmp;
567
568 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
569 if (read)
570 dwTmp |= SPI_CTRL_READ_94XX;
571
572 if (addr != MV_MAX_U32) {
573 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
574 dwTmp |= SPI_ADDR_VLD_94XX;
575 }
576
577 *dwCmd = dwTmp;
578 return 0;
579}
580
581
582int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
583{
584 void __iomem *regs = mvi->regs_ex - 0x10200;
585 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
586
587 return 0;
588}
589
590int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
591{
592 void __iomem *regs = mvi->regs_ex - 0x10200;
593 u32 i, dwTmp;
594
595 for (i = 0; i < timeout; i++) {
596 dwTmp = mr32(SPI_CTRL_REG_94XX);
597 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
598 return 0;
599 msleep(10);
600 }
601
602 return -1;
603}
604
605#ifndef DISABLE_HOTPLUG_DMA_FIX
606void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
607{
608 int i;
609 struct mvs_prd *buf_prd = prd;
610 buf_prd += from;
611 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
612 buf_prd->addr = cpu_to_le64(buf_dma);
613 buf_prd->im_len.len = cpu_to_le32(buf_len);
614 ++buf_prd;
615 }
616}
617#endif
618
619const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx",
621 mvs_94xx_init,
622 NULL,
623 mvs_94xx_ioremap,
624 mvs_94xx_iounmap,
625 mvs_94xx_isr,
626 mvs_94xx_isr_status,
627 mvs_94xx_interrupt_enable,
628 mvs_94xx_interrupt_disable,
629 mvs_read_phy_ctl,
630 mvs_write_phy_ctl,
631 mvs_read_port_cfg_data,
632 mvs_write_port_cfg_data,
633 mvs_write_port_cfg_addr,
634 mvs_read_port_vsr_data,
635 mvs_write_port_vsr_data,
636 mvs_write_port_vsr_addr,
637 mvs_read_port_irq_stat,
638 mvs_write_port_irq_stat,
639 mvs_read_port_irq_mask,
640 mvs_write_port_irq_mask,
641 mvs_get_sas_addr,
642 mvs_94xx_command_active,
643 mvs_94xx_issue_stop,
644 mvs_start_delivery,
645 mvs_rx_update,
646 mvs_int_full,
647 mvs_94xx_assign_reg_set,
648 mvs_94xx_free_reg_set,
649 mvs_get_prd_size,
650 mvs_get_prd_count,
651 mvs_94xx_make_prd,
652 mvs_94xx_detect_porttype,
653 mvs_94xx_oob_done,
654 mvs_94xx_fix_phy_info,
655 NULL,
656 mvs_94xx_phy_set_link_rate,
657 mvs_hw_max_link_rate,
658 mvs_94xx_phy_disable,
659 mvs_94xx_phy_enable,
660 mvs_94xx_phy_reset,
661 NULL,
662 mvs_94xx_clear_active_cmds,
663 mvs_94xx_spi_read_data,
664 mvs_94xx_spi_write_data,
665 mvs_94xx_spi_buildcmd,
666 mvs_94xx_spi_issuecmd,
667 mvs_94xx_spi_waitdataready,
668#ifndef DISABLE_HOTPLUG_DMA_FIX
669 mvs_94xx_fix_dma,
670#endif
671};
672
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 000000000000..23ed9b164669
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
1/*
2 * Marvell 88SE94xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS94XX_REG_H_
26#define _MVS94XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
31
32enum hw_registers {
33 MVS_GBL_CTL = 0x04, /* global control */
34 MVS_GBL_INT_STAT = 0x00, /* global irq status */
35 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
36
37 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
38 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
39
40 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
41
42 MVS_CTL = 0x100, /* SAS/SATA port configuration */
43 MVS_PCS = 0x104, /* SAS/SATA port control/status */
44 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
45 MVS_CMD_LIST_HI = 0x10C,
46 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
47 MVS_RX_FIS_HI = 0x114,
48 MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
49 MVS_STP_REG_SET_1 = 0x11C,
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67 MVS_INT_STAT_SRS_1 = 0x160,
68 MVS_INT_MASK_SRS_1 = 0x164,
69 MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
70 MVS_NON_NCQ_ERR_1 = 0x16C,
71 MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
72 MVS_CMD_DATA = 0x174, /* Command register port (data) */
73 MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
77 MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
78 /* ports 5-7 follow after this */
79 MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
80 MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
81
82 /* ports 1-3 follow after this */
83 MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
84 /* ports 5-7 follow after this */
85 MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
86
87 /* ports 1-3 follow after this */
88 MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
89 MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
90 /* ports 5-7 follow after this */
91 MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
92 MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
93
94 /* phys 1-3 follow after this */
95 MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
96 MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
97 /* phys 1-3 follow after this */
98 /* multiplexing */
99 MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
100 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
101 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
102 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
103};
104
105enum pci_cfg_registers {
106 PCR_PHY_CTL = 0x40,
107 PCR_PHY_CTL2 = 0x90,
108 PCR_DEV_CTRL = 0x78,
109 PCR_LINK_STAT = 0x82,
110};
111
112/* SAS/SATA Vendor Specific Port Registers */
113enum sas_sata_vsp_regs {
114 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
115 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
116 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
117 VSR_PHY_MODE3 = 0x03 * 4, /* pll */
118 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
119 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
120 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
121 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
122 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
123 VSR_PHY_MODE9 = 0x09 * 4, /* Test */
124 VSR_PHY_MODE10 = 0x0A * 4, /* Power */
125 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
126 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
127 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
128};
129
130enum chip_register_bits {
131 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
132 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
135 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
136};
137
138enum pci_interrupt_cause {
139 /* MAIN_IRQ_CAUSE (R10200) Bits*/
140 IRQ_COM_IN_I2O_IOP0 = (1 << 0),
141 IRQ_COM_IN_I2O_IOP1 = (1 << 1),
142 IRQ_COM_IN_I2O_IOP2 = (1 << 2),
143 IRQ_COM_IN_I2O_IOP3 = (1 << 3),
144 IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
145 IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
146 IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
147 IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
148 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
149 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
150 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
151 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
152 IRQ_PCIF_DRBL0 = (1 << 12),
153 IRQ_PCIF_DRBL1 = (1 << 13),
154 IRQ_PCIF_DRBL2 = (1 << 14),
155 IRQ_PCIF_DRBL3 = (1 << 15),
156 IRQ_XOR_A = (1 << 16),
157 IRQ_XOR_B = (1 << 17),
158 IRQ_SAS_A = (1 << 18),
159 IRQ_SAS_B = (1 << 19),
160 IRQ_CPU_CNTRL = (1 << 20),
161 IRQ_GPIO = (1 << 21),
162 IRQ_UART = (1 << 22),
163 IRQ_SPI = (1 << 23),
164 IRQ_I2C = (1 << 24),
165 IRQ_SGPIO = (1 << 25),
166 IRQ_COM_ERR = (1 << 29),
167 IRQ_I2O_ERR = (1 << 30),
168 IRQ_PCIE_ERR = (1 << 31),
169};
170
171#define MAX_SG_ENTRY 255
172
173struct mvs_prd_imt {
174 __le32 len:22;
175 u8 _r_a:2;
176 u8 misc_ctl:4;
177 u8 inter_sel:4;
178};
179
180struct mvs_prd {
181 /* 64-bit buffer address */
182 __le64 addr;
183 /* 22-bit length */
184 struct mvs_prd_imt im_len;
185} __attribute__ ((packed));
186
187#define SPI_CTRL_REG_94XX 0xc800
188#define SPI_ADDR_REG_94XX 0xc804
189#define SPI_WR_DATA_REG_94XX 0xc808
190#define SPI_RD_DATA_REG_94XX 0xc80c
191#define SPI_CTRL_READ_94XX (1U << 2)
192#define SPI_ADDR_VLD_94XX (1U << 1)
193#define SPI_CTRL_SpiStart_94XX (1U << 0)
194
195#define mv_ffc(x) ffz(x)
196
197static inline int
198mv_ffc64(u64 v)
199{
200 int i;
201 i = mv_ffc((u32)v);
202 if (i >= 0)
203 return i;
204 i = mv_ffc((u32)(v>>32));
205
206 if (i != 0)
207 return 32 + i;
208
209 return -1;
210}
211
212#define r_reg_set_enable(i) \
213 (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
214 mr32(MVS_STP_REG_SET_0))
215
216#define w_reg_set_enable(i, tmp) \
217 (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
218 mw32(MVS_STP_REG_SET_0, tmp))
219
220extern const struct mvs_dispatch mvs_94xx_dispatch;
221#endif
222
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 000000000000..a67e1c4172f9
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,280 @@
1/*
2 * Marvell 88SE64xx/88SE94xx register IO interface
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#ifndef _MV_CHIPS_H_
27#define _MV_CHIPS_H_
28
29#define mr32(reg) readl(regs + reg)
30#define mw32(reg, val) writel((val), regs + reg)
31#define mw32_f(reg, val) do { \
32 mw32(reg, val); \
33 mr32(reg); \
34 } while (0)
35
36#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
37#define ior32(reg) inl((unsigned long)(regs + reg))
38#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
39#define ior16(reg) inw((unsigned long)(regs + reg))
40#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
41#define ior8(reg) inb((unsigned long)(regs + reg))
42
43static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
44{
45 void __iomem *regs = mvi->regs;
46 mw32(MVS_CMD_ADDR, addr);
47 return mr32(MVS_CMD_DATA);
48}
49
50static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
51{
52 void __iomem *regs = mvi->regs;
53 mw32(MVS_CMD_ADDR, addr);
54 mw32(MVS_CMD_DATA, val);
55}
56
57static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
58{
59 void __iomem *regs = mvi->regs;
60 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
61 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
62}
63
64static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
65{
66 void __iomem *regs = mvi->regs;
67 if (port < 4)
68 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
69 else
70 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
71}
72
73static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
74 u32 off2, u32 port)
75{
76 void __iomem *regs = mvi->regs + off;
77 void __iomem *regs2 = mvi->regs + off2;
78 return (port < 4) ? readl(regs + port * 8) :
79 readl(regs2 + (port - 4) * 8);
80}
81
82static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
83 u32 port, u32 val)
84{
85 void __iomem *regs = mvi->regs + off;
86 void __iomem *regs2 = mvi->regs + off2;
87 if (port < 4)
88 writel(val, regs + port * 8);
89 else
90 writel(val, regs2 + (port - 4) * 8);
91}
92
93static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
94{
95 return mvs_read_port(mvi, MVS_P0_CFG_DATA,
96 MVS_P4_CFG_DATA, port);
97}
98
99static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
100 u32 port, u32 val)
101{
102 mvs_write_port(mvi, MVS_P0_CFG_DATA,
103 MVS_P4_CFG_DATA, port, val);
104}
105
106static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
107 u32 port, u32 addr)
108{
109 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
110 MVS_P4_CFG_ADDR, port, addr);
111 mdelay(10);
112}
113
114static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
115{
116 return mvs_read_port(mvi, MVS_P0_VSR_DATA,
117 MVS_P4_VSR_DATA, port);
118}
119
120static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
121 u32 port, u32 val)
122{
123 mvs_write_port(mvi, MVS_P0_VSR_DATA,
124 MVS_P4_VSR_DATA, port, val);
125}
126
127static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
128 u32 port, u32 addr)
129{
130 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
131 MVS_P4_VSR_ADDR, port, addr);
132 mdelay(10);
133}
134
135static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
136{
137 return mvs_read_port(mvi, MVS_P0_INT_STAT,
138 MVS_P4_INT_STAT, port);
139}
140
141static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
142 u32 port, u32 val)
143{
144 mvs_write_port(mvi, MVS_P0_INT_STAT,
145 MVS_P4_INT_STAT, port, val);
146}
147
148static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
149{
150 return mvs_read_port(mvi, MVS_P0_INT_MASK,
151 MVS_P4_INT_MASK, port);
152
153}
154
155static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
156 u32 port, u32 val)
157{
158 mvs_write_port(mvi, MVS_P0_INT_MASK,
159 MVS_P4_INT_MASK, port, val);
160}
161
162static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
163{
164 u32 tmp;
165
166 /* workaround for SATA R-ERR, to ignore phy glitch */
167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
168 tmp &= ~(1 << 9);
169 tmp |= (1 << 10);
170 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
171
172 /* enable retry 127 times */
173 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
174
175 /* extend open frame timeout to max */
176 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
177 tmp &= ~0xffff;
178 tmp |= 0x3fff;
179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
180
181 /* workaround for WDTIMEOUT , set to 550 ms */
182 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
183
184 /* not to halt for different port op during wideport link change */
185 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
186
187 /* workaround for Seagate disk not-found OOB sequence, recv
188 * COMINIT before sending out COMWAKE */
189 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
190 tmp &= 0x0000ffff;
191 tmp |= 0x00fa0000;
192 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
193
194 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
195 tmp &= 0x1fffffff;
196 tmp |= (2U << 29); /* 8 ms retry */
197 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
198}
199
200static inline void mvs_int_sata(struct mvs_info *mvi)
201{
202 u32 tmp;
203 void __iomem *regs = mvi->regs;
204 tmp = mr32(MVS_INT_STAT_SRS_0);
205 if (tmp)
206 mw32(MVS_INT_STAT_SRS_0, tmp);
207 MVS_CHIP_DISP->clear_active_cmds(mvi);
208}
209
210static inline void mvs_int_full(struct mvs_info *mvi)
211{
212 void __iomem *regs = mvi->regs;
213 u32 tmp, stat;
214 int i;
215
216 stat = mr32(MVS_INT_STAT);
217 mvs_int_rx(mvi, false);
218
219 for (i = 0; i < mvi->chip->n_phy; i++) {
220 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
221 if (tmp)
222 mvs_int_port(mvi, i, tmp);
223 }
224
225 if (stat & CINT_SRS)
226 mvs_int_sata(mvi);
227
228 mw32(MVS_INT_STAT, stat);
229}
230
231static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
232{
233 void __iomem *regs = mvi->regs;
234 mw32(MVS_TX_PROD_IDX, tx);
235}
236
237static inline u32 mvs_rx_update(struct mvs_info *mvi)
238{
239 void __iomem *regs = mvi->regs;
240 return mr32(MVS_RX_CONS_IDX);
241}
242
243static inline u32 mvs_get_prd_size(void)
244{
245 return sizeof(struct mvs_prd);
246}
247
248static inline u32 mvs_get_prd_count(void)
249{
250 return MAX_SG_ENTRY;
251}
252
253static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
254{
255 u16 link_stat, link_spd;
256 const char *spd[] = {
257 "UnKnown",
258 "2.5",
259 "5.0",
260 };
261 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
262 return;
263
264 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
265 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
266 if (link_spd >= 3)
267 link_spd = 0;
268 dev_printk(KERN_INFO, mvi->dev,
269 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
270 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
271 spd[link_spd]);
272}
273
274static inline u32 mvs_hw_max_link_rate(void)
275{
276 return MAX_LINK_RATE;
277}
278
279#endif /* _MV_CHIPS_H_ */
280
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 000000000000..f8cb9defb961
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,502 @@
1/*
2 * Marvell 88SE64xx/88SE94xx const head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_DEFS_H_
26#define _MV_DEFS_H_
27
28
29enum chip_flavors {
30 chip_6320,
31 chip_6440,
32 chip_6485,
33 chip_9480,
34 chip_9180,
35};
36
37/* driver compile-time configuration */
38enum driver_configuration {
39 MVS_SLOTS = 512, /* command slots */
40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
42 /* software requires power-of-2
43 ring size */
44 MVS_SOC_SLOTS = 64,
45 MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
46 MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
47
48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
51 MVS_OAF_SZ = 64, /* Open address frame buffer size */
52 MVS_QUEUE_SIZE = 32, /* Support Queue depth */
53 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
54 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
55};
56
57/* unchangeable hardware details */
58enum hardware_details {
59 MVS_MAX_PHYS = 8, /* max. possible phys */
60 MVS_MAX_PORTS = 8, /* max. possible ports */
61 MVS_SOC_PHYS = 4, /* soc phys */
62 MVS_SOC_PORTS = 4, /* soc phys */
63 MVS_MAX_DEVICES = 1024, /* max supported device */
64};
65
66/* peripheral registers (BAR2) */
67enum peripheral_registers {
68 SPI_CTL = 0x10, /* EEPROM control */
69 SPI_CMD = 0x14, /* EEPROM command */
70 SPI_DATA = 0x18, /* EEPROM data */
71};
72
73enum peripheral_register_bits {
74 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
75 TWSI_RD = (1U << 4), /* EEPROM read access */
76
77 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
78};
79
80enum hw_register_bits {
81 /* MVS_GBL_CTL */
82 INT_EN = (1U << 1), /* Global int enable */
83 HBA_RST = (1U << 0), /* HBA reset */
84
85 /* MVS_GBL_INT_STAT */
86 INT_XOR = (1U << 4), /* XOR engine event */
87 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
88
89 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
90 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
91 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
92 MODE_AUTO_DET_PORT6 = (1U << 14),
93 MODE_AUTO_DET_PORT5 = (1U << 13),
94 MODE_AUTO_DET_PORT4 = (1U << 12),
95 MODE_AUTO_DET_PORT3 = (1U << 11),
96 MODE_AUTO_DET_PORT2 = (1U << 10),
97 MODE_AUTO_DET_PORT1 = (1U << 9),
98 MODE_AUTO_DET_PORT0 = (1U << 8),
99 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
100 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
101 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
102 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
103 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
104 MODE_SAS_PORT6_MASK = (1U << 6),
105 MODE_SAS_PORT5_MASK = (1U << 5),
106 MODE_SAS_PORT4_MASK = (1U << 4),
107 MODE_SAS_PORT3_MASK = (1U << 3),
108 MODE_SAS_PORT2_MASK = (1U << 2),
109 MODE_SAS_PORT1_MASK = (1U << 1),
110 MODE_SAS_PORT0_MASK = (1U << 0),
111 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
112 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
113 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
114 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
115
116 /* SAS_MODE value may be
117 * dictated (in hw) by values
118 * of SATA_TARGET & AUTO_DET
119 */
120
121 /* MVS_TX_CFG */
122 TX_EN = (1U << 16), /* Enable TX */
123 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
124
125 /* MVS_RX_CFG */
126 RX_EN = (1U << 16), /* Enable RX */
127 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
128
129 /* MVS_INT_COAL */
130 COAL_EN = (1U << 16), /* Enable int coalescing */
131
132 /* MVS_INT_STAT, MVS_INT_MASK */
133 CINT_I2C = (1U << 31), /* I2C event */
134 CINT_SW0 = (1U << 30), /* software event 0 */
135 CINT_SW1 = (1U << 29), /* software event 1 */
136 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
137 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
138 CINT_MEM = (1U << 26), /* int mem parity err */
139 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
140 CINT_SRS = (1U << 3), /* SRS event */
141 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
142 CINT_DONE = (1U << 0), /* cmd completion */
143
144 /* shl for ports 1-3 */
145 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
146 CINT_PORT = (1U << 8), /* port0 event */
147 CINT_PORT_MASK_OFFSET = 8,
148 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
149 CINT_PHY_MASK_OFFSET = 4,
150 CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
151
152 /* TX (delivery) ring bits */
153 TXQ_CMD_SHIFT = 29,
154 TXQ_CMD_SSP = 1, /* SSP protocol */
155 TXQ_CMD_SMP = 2, /* SMP protocol */
156 TXQ_CMD_STP = 3, /* STP/SATA protocol */
157 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
158 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
159 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
160 TXQ_MODE_TARGET = 0,
161 TXQ_MODE_INITIATOR = 1,
162 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
163 TXQ_PRI_NORMAL = 0,
164 TXQ_PRI_HIGH = 1,
165 TXQ_SRS_SHIFT = 20, /* SATA register set */
166 TXQ_SRS_MASK = 0x7f,
167 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
168 TXQ_PHY_MASK = 0xff,
169 TXQ_SLOT_MASK = 0xfff, /* slot number */
170
171 /* RX (completion) ring bits */
172 RXQ_GOOD = (1U << 23), /* Response good */
173 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
174 RXQ_CMD_RX = (1U << 20), /* target cmd received */
175 RXQ_ATTN = (1U << 19), /* attention */
176 RXQ_RSP = (1U << 18), /* response frame xfer'd */
177 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
178 RXQ_DONE = (1U << 16), /* cmd complete */
179 RXQ_SLOT_MASK = 0xfff, /* slot number */
180
181 /* mvs_cmd_hdr bits */
182 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
183 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
184
185 /* SSP initiator only */
186 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
187
188 /* SSP initiator or target */
189 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
190
191 /* SSP target only */
192 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
193 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
194 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
195 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
196
197 MCH_SSP_MODE_PASSTHRU = 1,
198 MCH_SSP_MODE_NORMAL = 0,
199 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
200 MCH_FBURST = (1U << 11), /* first burst (SSP) */
201 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
202 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
203 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
204 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
205 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
206 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
207 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
208 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
209
210 CCTL_RST = (1U << 5), /* port logic reset */
211
212 /* 0(LSB first), 1(MSB first) */
213 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
214 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
215 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
216 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
217
218 /* MVS_Px_SER_CTLSTAT (per-phy control) */
219 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
220 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
221 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
222 PHY_RST = (1U << 0), /* phy reset */
223 PHY_READY_MASK = (1U << 20),
224
225 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
226 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
227 PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
228 PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
229 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
230 PHYEV_AN = (1U << 18), /* SATA async notification */
231 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
232 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
233 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
234 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
235 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
236 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
237 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
238 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
239 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
240 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
241 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
242 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
243 PHYEV_ID_DONE = (1U << 2), /* identify done */
244 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
245 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
246
247 /* MVS_PCS */
248 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
249 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
250 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
251 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
252 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
253 PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
254 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
255 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
256 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
257 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
258 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
259
260 /* Port n Attached Device Info */
261 PORT_DEV_SSP_TRGT = (1U << 19),
262 PORT_DEV_SMP_TRGT = (1U << 18),
263 PORT_DEV_STP_TRGT = (1U << 17),
264 PORT_DEV_SSP_INIT = (1U << 11),
265 PORT_DEV_SMP_INIT = (1U << 10),
266 PORT_DEV_STP_INIT = (1U << 9),
267 PORT_PHY_ID_MASK = (0xFFU << 24),
268 PORT_SSP_TRGT_MASK = (0x1U << 19),
269 PORT_SSP_INIT_MASK = (0x1U << 11),
270 PORT_DEV_TRGT_MASK = (0x7U << 17),
271 PORT_DEV_INIT_MASK = (0x7U << 9),
272 PORT_DEV_TYPE_MASK = (0x7U << 0),
273
274 /* Port n PHY Status */
275 PHY_RDY = (1U << 2),
276 PHY_DW_SYNC = (1U << 1),
277 PHY_OOB_DTCTD = (1U << 0),
278
279 /* VSR */
280 /* PHYMODE 6 (CDB) */
281 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
282 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
283 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
284 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
285 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
286 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
287 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
288 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
289 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
290 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
291 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
292 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
293 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
294 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
295};
296
297/* SAS/SATA configuration port registers, aka phy registers */
298enum sas_sata_config_port_regs {
299 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
300 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
301 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
302 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
303 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
304 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
305 PHYR_SATA_CTL = 0x18, /* SATA control */
306 PHYR_PHY_STAT = 0x1C, /* PHY status */
307 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
308 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
309 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
310 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
311 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
312 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
313 PHYR_WIDE_PORT = 0x38, /* wide port participating */
314 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
315 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
316 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
317 CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
318 CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
319 CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
320 CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
321 CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
322 CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
323 CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
324 CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
325 CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
326 CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
327 CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
328 CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
329 CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
330 CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
331};
332
333enum sas_cmd_port_registers {
334 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
335 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
336 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
337 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
338 CMD_OOB_SPACE = 0x110, /* OOB space control register */
339 CMD_OOB_BURST = 0x114, /* OOB burst control register */
340 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
341 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
342 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
343 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
344 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
345 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
346 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
347 CMD_ID_TEST = 0x134, /* ID test register */
348 CMD_PL_TIMER = 0x138, /* PL timer register */
349 CMD_WD_TIMER = 0x13c, /* WD timer register */
350 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
351 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
352 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
353 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
354 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
355 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
356 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
357 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
358 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
359 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
360 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
361 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
362 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
363 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
364 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
365 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
366 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
367 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
368 CMD_RESET_COUNT = 0x188, /* Reset Count */
369 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
370 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
371 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
372 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
373 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
374 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
375 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
376 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
377 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
378 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
379 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
380 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
381 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
382 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
383 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
384};
385
386enum mvs_info_flags {
387 MVF_MSI = (1U << 0), /* MSI is enabled */
388 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
389 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
390};
391
392enum mvs_event_flags {
393 PHY_PLUG_EVENT = (3U),
394 PHY_PLUG_IN = (1U << 0), /* phy plug in */
395 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
396};
397
398enum mvs_port_type {
399 PORT_TGT_MASK = (1U << 5),
400 PORT_INIT_PORT = (1U << 4),
401 PORT_TGT_PORT = (1U << 3),
402 PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
403 PORT_TYPE_SAS = (1U << 1),
404 PORT_TYPE_SATA = (1U << 0),
405};
406
407/* Command Table Format */
408enum ct_format {
409 /* SSP */
410 SSP_F_H = 0x00,
411 SSP_F_IU = 0x18,
412 SSP_F_MAX = 0x4D,
413 /* STP */
414 STP_CMD_FIS = 0x00,
415 STP_ATAPI_CMD = 0x40,
416 STP_F_MAX = 0x10,
417 /* SMP */
418 SMP_F_T = 0x00,
419 SMP_F_DEP = 0x01,
420 SMP_F_MAX = 0x101,
421};
422
423enum status_buffer {
424 SB_EIR_OFF = 0x00, /* Error Information Record */
425 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
426 SB_RFB_MAX = 0x400, /* RFB size*/
427};
428
429enum error_info_rec {
430 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
431 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
432 RSP_OVER = (1U << 29), /* rsp buffer overflow */
433 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
434 UNK_FIS = (1U << 27), /* unknown FIS */
435 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
436 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
437 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
438 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
439 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
440 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
441 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
442 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
443 INTERLOCK = (1U << 15), /* interlock error */
444 NAK = (1U << 14), /* NAK rx'd */
445 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
446 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
447 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
448 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
449 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
450 STP_RES_BSY = (1U << 8), /* STP resources busy */
451 BREAK = (1U << 7), /* break received */
452 BAD_DEST = (1U << 6), /* bad destination */
453 BAD_PROTO = (1U << 5), /* protocol not supported */
454 BAD_RATE = (1U << 4), /* cxn rate not supported */
455 WRONG_DEST = (1U << 3), /* wrong destination error */
456 CREDIT_TO = (1U << 2), /* credit timeout */
457 WDOG_TO = (1U << 1), /* watchdog timeout */
458 BUF_PAR = (1U << 0), /* buffer parity error */
459};
460
461enum error_info_rec_2 {
462 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
463 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
464 APP_CHK_ERR = (1U << 13), /* Application Check error */
465 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
466 USR_BLK_NM = (1U << 0), /* User Block Number */
467};
468
469enum pci_cfg_register_bits {
470 PCTL_PWR_OFF = (0xFU << 24),
471 PCTL_COM_ON = (0xFU << 20),
472 PCTL_LINK_RST = (0xFU << 16),
473 PCTL_LINK_OFFS = (16),
474 PCTL_PHY_DSBL = (0xFU << 12),
475 PCTL_PHY_DSBL_OFFS = (12),
476 PRD_REQ_SIZE = (0x4000),
477 PRD_REQ_MASK = (0x00007000),
478 PLS_NEG_LINK_WD = (0x3FU << 4),
479 PLS_NEG_LINK_WD_OFFS = 4,
480 PLS_LINK_SPD = (0x0FU << 0),
481 PLS_LINK_SPD_OFFS = 0,
482};
483
484enum open_frame_protocol {
485 PROTOCOL_SMP = 0x0,
486 PROTOCOL_SSP = 0x1,
487 PROTOCOL_STP = 0x2,
488};
489
490/* define for response frame datapres field */
491enum datapres_field {
492 NO_DATA = 0,
493 RESPONSE_DATA = 1,
494 SENSE_DATA = 2,
495};
496
497/* define task management IU */
498struct mvs_tmf_task{
499 u8 tmf;
500 u16 tag_of_task_to_be_managed;
501};
502#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 000000000000..8646a19f999d
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,703 @@
1/*
2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#include "mv_sas.h"
27
28static struct scsi_transport_template *mvs_stt;
29static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35};
36
37#define SOC_SAS_NUM 2
38
39static struct scsi_host_template mvs_sht = {
40 .module = THIS_MODULE,
41 .name = DRV_NAME,
42 .queuecommand = sas_queuecommand,
43 .target_alloc = sas_target_alloc,
44 .slave_configure = mvs_slave_configure,
45 .slave_destroy = sas_slave_destroy,
46 .scan_finished = mvs_scan_finished,
47 .scan_start = mvs_scan_start,
48 .change_queue_depth = sas_change_queue_depth,
49 .change_queue_type = sas_change_queue_type,
50 .bios_param = sas_bios_param,
51 .can_queue = 1,
52 .cmd_per_lun = 1,
53 .this_id = -1,
54 .sg_tablesize = SG_ALL,
55 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
56 .use_clustering = ENABLE_CLUSTERING,
57 .eh_device_reset_handler = sas_eh_device_reset_handler,
58 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
59 .slave_alloc = mvs_slave_alloc,
60 .target_destroy = sas_target_destroy,
61 .ioctl = sas_ioctl,
62};
63
64static struct sas_domain_function_template mvs_transport_ops = {
65 .lldd_dev_found = mvs_dev_found,
66 .lldd_dev_gone = mvs_dev_gone,
67
68 .lldd_execute_task = mvs_queue_command,
69 .lldd_control_phy = mvs_phy_control,
70
71 .lldd_abort_task = mvs_abort_task,
72 .lldd_abort_task_set = mvs_abort_task_set,
73 .lldd_clear_aca = mvs_clear_aca,
74 .lldd_clear_task_set = mvs_clear_task_set,
75 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
76 .lldd_lu_reset = mvs_lu_reset,
77 .lldd_query_task = mvs_query_task,
78
79 .lldd_port_formed = mvs_port_formed,
80 .lldd_port_deformed = mvs_port_deformed,
81
82};
83
84static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
85{
86 struct mvs_phy *phy = &mvi->phy[phy_id];
87 struct asd_sas_phy *sas_phy = &phy->sas_phy;
88
89 phy->mvi = mvi;
90 init_timer(&phy->timer);
91 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
92 sas_phy->class = SAS;
93 sas_phy->iproto = SAS_PROTOCOL_ALL;
94 sas_phy->tproto = 0;
95 sas_phy->type = PHY_TYPE_PHYSICAL;
96 sas_phy->role = PHY_ROLE_INITIATOR;
97 sas_phy->oob_mode = OOB_NOT_CONNECTED;
98 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
99
100 sas_phy->id = phy_id;
101 sas_phy->sas_addr = &mvi->sas_addr[0];
102 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
103 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
104 sas_phy->lldd_phy = phy;
105}
106
107static void mvs_free(struct mvs_info *mvi)
108{
109 int i;
110 struct mvs_wq *mwq;
111 int slot_nr;
112
113 if (!mvi)
114 return;
115
116 if (mvi->flags & MVF_FLAG_SOC)
117 slot_nr = MVS_SOC_SLOTS;
118 else
119 slot_nr = MVS_SLOTS;
120
121 for (i = 0; i < mvi->tags_num; i++) {
122 struct mvs_slot_info *slot = &mvi->slot_info[i];
123 if (slot->buf)
124 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
125 slot->buf, slot->buf_dma);
126 }
127
128 if (mvi->tx)
129 dma_free_coherent(mvi->dev,
130 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
131 mvi->tx, mvi->tx_dma);
132 if (mvi->rx_fis)
133 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
134 mvi->rx_fis, mvi->rx_fis_dma);
135 if (mvi->rx)
136 dma_free_coherent(mvi->dev,
137 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
138 mvi->rx, mvi->rx_dma);
139 if (mvi->slot)
140 dma_free_coherent(mvi->dev,
141 sizeof(*mvi->slot) * slot_nr,
142 mvi->slot, mvi->slot_dma);
143#ifndef DISABLE_HOTPLUG_DMA_FIX
144 if (mvi->bulk_buffer)
145 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
146 mvi->bulk_buffer, mvi->bulk_buffer_dma);
147#endif
148
149 MVS_CHIP_DISP->chip_iounmap(mvi);
150 if (mvi->shost)
151 scsi_host_put(mvi->shost);
152 list_for_each_entry(mwq, &mvi->wq_list, entry)
153 cancel_delayed_work(&mwq->work_q);
154 kfree(mvi);
155}
156
157#ifdef MVS_USE_TASKLET
158struct tasklet_struct mv_tasklet;
159static void mvs_tasklet(unsigned long opaque)
160{
161 unsigned long flags;
162 u32 stat;
163 u16 core_nr, i = 0;
164
165 struct mvs_info *mvi;
166 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
167
168 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
169 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
170
171 if (unlikely(!mvi))
172 BUG_ON(1);
173
174 for (i = 0; i < core_nr; i++) {
175 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
176 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
177 if (stat)
178 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
179 }
180
181}
182#endif
183
184static irqreturn_t mvs_interrupt(int irq, void *opaque)
185{
186 u32 core_nr, i = 0;
187 u32 stat;
188 struct mvs_info *mvi;
189 struct sas_ha_struct *sha = opaque;
190
191 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
192 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
193
194 if (unlikely(!mvi))
195 return IRQ_NONE;
196
197 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
198 if (!stat)
199 return IRQ_NONE;
200
201#ifdef MVS_USE_TASKLET
202 tasklet_schedule(&mv_tasklet);
203#else
204 for (i = 0; i < core_nr; i++) {
205 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
206 MVS_CHIP_DISP->isr(mvi, irq, stat);
207 }
208#endif
209 return IRQ_HANDLED;
210}
211
212static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
213{
214 int i, slot_nr;
215
216 if (mvi->flags & MVF_FLAG_SOC)
217 slot_nr = MVS_SOC_SLOTS;
218 else
219 slot_nr = MVS_SLOTS;
220
221 spin_lock_init(&mvi->lock);
222 for (i = 0; i < mvi->chip->n_phy; i++) {
223 mvs_phy_init(mvi, i);
224 mvi->port[i].wide_port_phymap = 0;
225 mvi->port[i].port_attached = 0;
226 INIT_LIST_HEAD(&mvi->port[i].list);
227 }
228 for (i = 0; i < MVS_MAX_DEVICES; i++) {
229 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
230 mvi->devices[i].dev_type = NO_DEVICE;
231 mvi->devices[i].device_id = i;
232 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
233 }
234
235 /*
236 * alloc and init our DMA areas
237 */
238 mvi->tx = dma_alloc_coherent(mvi->dev,
239 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
240 &mvi->tx_dma, GFP_KERNEL);
241 if (!mvi->tx)
242 goto err_out;
243 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
244 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
245 &mvi->rx_fis_dma, GFP_KERNEL);
246 if (!mvi->rx_fis)
247 goto err_out;
248 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
249
250 mvi->rx = dma_alloc_coherent(mvi->dev,
251 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
252 &mvi->rx_dma, GFP_KERNEL);
253 if (!mvi->rx)
254 goto err_out;
255 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
256 mvi->rx[0] = cpu_to_le32(0xfff);
257 mvi->rx_cons = 0xfff;
258
259 mvi->slot = dma_alloc_coherent(mvi->dev,
260 sizeof(*mvi->slot) * slot_nr,
261 &mvi->slot_dma, GFP_KERNEL);
262 if (!mvi->slot)
263 goto err_out;
264 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
265
266#ifndef DISABLE_HOTPLUG_DMA_FIX
267 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 TRASH_BUCKET_SIZE,
269 &mvi->bulk_buffer_dma, GFP_KERNEL);
270 if (!mvi->bulk_buffer)
271 goto err_out;
272#endif
273 for (i = 0; i < slot_nr; i++) {
274 struct mvs_slot_info *slot = &mvi->slot_info[i];
275
276 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
277 &slot->buf_dma, GFP_KERNEL);
278 if (!slot->buf) {
279 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
280 goto err_out;
281 }
282 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
283 ++mvi->tags_num;
284 }
285 /* Initialize tags */
286 mvs_tag_init(mvi);
287 return 0;
288err_out:
289 return 1;
290}
291
292
293int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
294{
295 unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
296 struct pci_dev *pdev = mvi->pdev;
297 if (bar_ex != -1) {
298 /*
299 * ioremap main and peripheral registers
300 */
301 res_start = pci_resource_start(pdev, bar_ex);
302 res_len = pci_resource_len(pdev, bar_ex);
303 if (!res_start || !res_len)
304 goto err_out;
305
306 res_flag_ex = pci_resource_flags(pdev, bar_ex);
307 if (res_flag_ex & IORESOURCE_MEM) {
308 if (res_flag_ex & IORESOURCE_CACHEABLE)
309 mvi->regs_ex = ioremap(res_start, res_len);
310 else
311 mvi->regs_ex = ioremap_nocache(res_start,
312 res_len);
313 } else
314 mvi->regs_ex = (void *)res_start;
315 if (!mvi->regs_ex)
316 goto err_out;
317 }
318
319 res_start = pci_resource_start(pdev, bar);
320 res_len = pci_resource_len(pdev, bar);
321 if (!res_start || !res_len)
322 goto err_out;
323
324 res_flag = pci_resource_flags(pdev, bar);
325 if (res_flag & IORESOURCE_CACHEABLE)
326 mvi->regs = ioremap(res_start, res_len);
327 else
328 mvi->regs = ioremap_nocache(res_start, res_len);
329
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
334 goto err_out;
335 }
336
337 return 0;
338err_out:
339 return -1;
340}
341
342void mvs_iounmap(void __iomem *regs)
343{
344 iounmap(regs);
345}
346
347static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
348 const struct pci_device_id *ent,
349 struct Scsi_Host *shost, unsigned int id)
350{
351 struct mvs_info *mvi;
352 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353
354 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
355 GFP_KERNEL);
356 if (!mvi)
357 return NULL;
358
359 mvi->pdev = pdev;
360 mvi->dev = &pdev->dev;
361 mvi->chip_id = ent->driver_data;
362 mvi->chip = &mvs_chips[mvi->chip_id];
363 INIT_LIST_HEAD(&mvi->wq_list);
364 mvi->irq = pdev->irq;
365
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
372#ifdef MVS_USE_TASKLET
373 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
374#endif
375
376 if (MVS_CHIP_DISP->chip_ioremap(mvi))
377 goto err_out;
378 if (!mvs_alloc(mvi, shost))
379 return mvi;
380err_out:
381 mvs_free(mvi);
382 return NULL;
383}
384
385/* move to PCI layer or libata core? */
386static int pci_go_64(struct pci_dev *pdev)
387{
388 int rc;
389
390 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
391 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
392 if (rc) {
393 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
394 if (rc) {
395 dev_printk(KERN_ERR, &pdev->dev,
396 "64-bit DMA enable failed\n");
397 return rc;
398 }
399 }
400 } else {
401 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
402 if (rc) {
403 dev_printk(KERN_ERR, &pdev->dev,
404 "32-bit DMA enable failed\n");
405 return rc;
406 }
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
408 if (rc) {
409 dev_printk(KERN_ERR, &pdev->dev,
410 "32-bit consistent DMA enable failed\n");
411 return rc;
412 }
413 }
414
415 return rc;
416}
417
418static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
419 const struct mvs_chip_info *chip_info)
420{
421 int phy_nr, port_nr; unsigned short core_nr;
422 struct asd_sas_phy **arr_phy;
423 struct asd_sas_port **arr_port;
424 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425
426 core_nr = chip_info->n_host;
427 phy_nr = core_nr * chip_info->n_phy;
428 port_nr = phy_nr;
429
430 memset(sha, 0x00, sizeof(struct sas_ha_struct));
431 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
432 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
433 if (!arr_phy || !arr_port)
434 goto exit_free;
435
436 sha->sas_phy = arr_phy;
437 sha->sas_port = arr_port;
438
439 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
440 if (!sha->lldd_ha)
441 goto exit_free;
442
443 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
444
445 shost->transportt = mvs_stt;
446 shost->max_id = 128;
447 shost->max_lun = ~0;
448 shost->max_channel = 1;
449 shost->max_cmd_len = 16;
450
451 return 0;
452exit_free:
453 kfree(arr_phy);
454 kfree(arr_port);
455 return -1;
456
457}
458
459static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
460 const struct mvs_chip_info *chip_info)
461{
462 int can_queue, i = 0, j = 0;
463 struct mvs_info *mvi = NULL;
464 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
465 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
466
467 for (j = 0; j < nr_core; j++) {
468 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
469 for (i = 0; i < chip_info->n_phy; i++) {
470 sha->sas_phy[j * chip_info->n_phy + i] =
471 &mvi->phy[i].sas_phy;
472 sha->sas_port[j * chip_info->n_phy + i] =
473 &mvi->port[i].sas_port;
474 }
475 }
476
477 sha->sas_ha_name = DRV_NAME;
478 sha->dev = mvi->dev;
479 sha->lldd_module = THIS_MODULE;
480 sha->sas_addr = &mvi->sas_addr[0];
481
482 sha->num_phys = nr_core * chip_info->n_phy;
483
484 sha->lldd_max_execute_num = 1;
485
486 if (mvi->flags & MVF_FLAG_SOC)
487 can_queue = MVS_SOC_CAN_QUEUE;
488 else
489 can_queue = MVS_CAN_QUEUE;
490
491 sha->lldd_queue_size = can_queue;
492 shost->can_queue = can_queue;
493 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
494 sha->core.shost = mvi->shost;
495}
496
497static void mvs_init_sas_add(struct mvs_info *mvi)
498{
499 u8 i;
500 for (i = 0; i < mvi->chip->n_phy; i++) {
501 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
502 mvi->phy[i].dev_sas_addr =
503 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
504 }
505
506 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
507}
508
509static int __devinit mvs_pci_init(struct pci_dev *pdev,
510 const struct pci_device_id *ent)
511{
512 unsigned int rc, nhost = 0;
513 struct mvs_info *mvi;
514 irq_handler_t irq_handler = mvs_interrupt;
515 struct Scsi_Host *shost = NULL;
516 const struct mvs_chip_info *chip;
517
518 dev_printk(KERN_INFO, &pdev->dev,
519 "mvsas: driver version %s\n", DRV_VERSION);
520 rc = pci_enable_device(pdev);
521 if (rc)
522 goto err_out_enable;
523
524 pci_set_master(pdev);
525
526 rc = pci_request_regions(pdev, DRV_NAME);
527 if (rc)
528 goto err_out_disable;
529
530 rc = pci_go_64(pdev);
531 if (rc)
532 goto err_out_regions;
533
534 shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
535 if (!shost) {
536 rc = -ENOMEM;
537 goto err_out_regions;
538 }
539
540 chip = &mvs_chips[ent->driver_data];
541 SHOST_TO_SAS_HA(shost) =
542 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
543 if (!SHOST_TO_SAS_HA(shost)) {
544 kfree(shost);
545 rc = -ENOMEM;
546 goto err_out_regions;
547 }
548
549 rc = mvs_prep_sas_ha_init(shost, chip);
550 if (rc) {
551 kfree(shost);
552 rc = -ENOMEM;
553 goto err_out_regions;
554 }
555
556 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
557
558 do {
559 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
560 if (!mvi) {
561 rc = -ENOMEM;
562 goto err_out_regions;
563 }
564
565 mvs_init_sas_add(mvi);
566
567 mvi->instance = nhost;
568 rc = MVS_CHIP_DISP->chip_init(mvi);
569 if (rc) {
570 mvs_free(mvi);
571 goto err_out_regions;
572 }
573 nhost++;
574 } while (nhost < chip->n_host);
575
576 mvs_post_sas_ha_init(shost, chip);
577
578 rc = scsi_add_host(shost, &pdev->dev);
579 if (rc)
580 goto err_out_shost;
581
582 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
583 if (rc)
584 goto err_out_shost;
585 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
586 DRV_NAME, SHOST_TO_SAS_HA(shost));
587 if (rc)
588 goto err_not_sas;
589
590 MVS_CHIP_DISP->interrupt_enable(mvi);
591
592 scsi_scan_host(mvi->shost);
593
594 return 0;
595
596err_not_sas:
597 sas_unregister_ha(SHOST_TO_SAS_HA(shost));
598err_out_shost:
599 scsi_remove_host(mvi->shost);
600err_out_regions:
601 pci_release_regions(pdev);
602err_out_disable:
603 pci_disable_device(pdev);
604err_out_enable:
605 return rc;
606}
607
608static void __devexit mvs_pci_remove(struct pci_dev *pdev)
609{
610 unsigned short core_nr, i = 0;
611 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
612 struct mvs_info *mvi = NULL;
613
614 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
615 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
616
617#ifdef MVS_USE_TASKLET
618 tasklet_kill(&mv_tasklet);
619#endif
620
621 pci_set_drvdata(pdev, NULL);
622 sas_unregister_ha(sha);
623 sas_remove_host(mvi->shost);
624 scsi_remove_host(mvi->shost);
625
626 MVS_CHIP_DISP->interrupt_disable(mvi);
627 free_irq(mvi->irq, sha);
628 for (i = 0; i < core_nr; i++) {
629 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
630 mvs_free(mvi);
631 }
632 kfree(sha->sas_phy);
633 kfree(sha->sas_port);
634 kfree(sha);
635 pci_release_regions(pdev);
636 pci_disable_device(pdev);
637 return;
638}
639
640static struct pci_device_id __devinitdata mvs_pci_table[] = {
641 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
642 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
643 {
644 .vendor = PCI_VENDOR_ID_MARVELL,
645 .device = 0x6440,
646 .subvendor = PCI_ANY_ID,
647 .subdevice = 0x6480,
648 .class = 0,
649 .class_mask = 0,
650 .driver_data = chip_6485,
651 },
652 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
656
657 { } /* terminate list */
658};
659
660static struct pci_driver mvs_pci_driver = {
661 .name = DRV_NAME,
662 .id_table = mvs_pci_table,
663 .probe = mvs_pci_init,
664 .remove = __devexit_p(mvs_pci_remove),
665};
666
667/* task handler */
668struct task_struct *mvs_th;
669static int __init mvs_init(void)
670{
671 int rc;
672 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
673 if (!mvs_stt)
674 return -ENOMEM;
675
676 rc = pci_register_driver(&mvs_pci_driver);
677
678 if (rc)
679 goto err_out;
680
681 return 0;
682
683err_out:
684 sas_release_transport(mvs_stt);
685 return rc;
686}
687
688static void __exit mvs_exit(void)
689{
690 pci_unregister_driver(&mvs_pci_driver);
691 sas_release_transport(mvs_stt);
692}
693
694module_init(mvs_init);
695module_exit(mvs_exit);
696
697MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
698MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
699MODULE_VERSION(DRV_VERSION);
700MODULE_LICENSE("GPL");
701#ifdef CONFIG_PCI
702MODULE_DEVICE_TABLE(pci, mvs_pci_table);
703#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 000000000000..0d2138641214
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2154 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26
27static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
28{
29 if (task->lldd_task) {
30 struct mvs_slot_info *slot;
31 slot = task->lldd_task;
32 *tag = slot->slot_tag;
33 return 1;
34 }
35 return 0;
36}
37
38void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
39{
40 void *bitmap = &mvi->tags;
41 clear_bit(tag, bitmap);
42}
43
44void mvs_tag_free(struct mvs_info *mvi, u32 tag)
45{
46 mvs_tag_clear(mvi, tag);
47}
48
49void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
50{
51 void *bitmap = &mvi->tags;
52 set_bit(tag, bitmap);
53}
54
55inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
56{
57 unsigned int index, tag;
58 void *bitmap = &mvi->tags;
59
60 index = find_first_zero_bit(bitmap, mvi->tags_num);
61 tag = index;
62 if (tag >= mvi->tags_num)
63 return -SAS_QUEUE_FULL;
64 mvs_tag_set(mvi, tag);
65 *tag_out = tag;
66 return 0;
67}
68
69void mvs_tag_init(struct mvs_info *mvi)
70{
71 int i;
72 for (i = 0; i < mvi->tags_num; ++i)
73 mvs_tag_clear(mvi, i);
74}
75
76void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
77{
78 u32 i;
79 u32 run;
80 u32 offset;
81
82 offset = 0;
83 while (size) {
84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
85 if (size >= 16)
86 run = 16;
87 else
88 run = size;
89 size -= run;
90 for (i = 0; i < 16; i++) {
91 if (i < run)
92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
93 else
94 printk(KERN_DEBUG" ");
95 }
96 printk(KERN_DEBUG": ");
97 for (i = 0; i < run; i++)
98 printk(KERN_DEBUG"%c",
99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
101 data = &data[16];
102 offset += run;
103 }
104 printk(KERN_DEBUG"\n");
105}
106
107#if (_MV_DUMP > 1)
108static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
109 enum sas_protocol proto)
110{
111 u32 offset;
112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
113
114 offset = slot->cmd_size + MVS_OAF_SZ +
115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
117 tag);
118 mvs_hexdump(32, (u8 *) slot->response,
119 (u32) slot->buf_dma + offset);
120}
121#endif
122
123static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
124 enum sas_protocol proto)
125{
126#if (_MV_DUMP > 1)
127 u32 sz, w_ptr;
128 u64 addr;
129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
130
131 /*Delivery Queue */
132 sz = MVS_CHIP_SLOT_SZ;
133 w_ptr = slot->tx;
134 addr = mvi->tx_dma;
135 dev_printk(KERN_DEBUG, mvi->dev,
136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
137 dev_printk(KERN_DEBUG, mvi->dev,
138 "Delivery Queue Base Address=0x%llX (PA)"
139 "(tx_dma=0x%llX), Entry=%04d\n",
140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
143 /*Command List */
144 addr = mvi->slot_dma;
145 dev_printk(KERN_DEBUG, mvi->dev,
146 "Command List Base Address=0x%llX (PA)"
147 "(slot_dma=0x%llX), Header=%03d\n",
148 addr, (unsigned long long)slot->buf_dma, tag);
149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
150 /*mvs_cmd_hdr */
151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
153 /*1.command table area */
154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
156 /*2.open address frame area */
157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
159 (u32) slot->buf_dma + slot->cmd_size);
160 /*3.status buffer */
161 mvs_hba_sb_dump(mvi, tag, proto);
162 /*4.PRD table */
163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
167#endif
168}
169
170static void mvs_hba_cq_dump(struct mvs_info *mvi)
171{
172#if (_MV_DUMP > 2)
173 u64 addr;
174 void __iomem *regs = mvi->regs;
175 u32 entry = mvi->rx_cons + 1;
176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
177
178 /*Completion Queue */
179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
182 dev_printk(KERN_DEBUG, mvi->dev,
183 "Completion List Base Address=0x%llX (PA), "
184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
185 addr, entry - 1, mvi->rx[0]);
186 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
187 mvi->rx_dma + sizeof(u32) * entry);
188#endif
189}
190
191void mvs_get_sas_addr(void *buf, u32 buflen)
192{
193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
194}
195
196struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
197{
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
202
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
212 }
213 break;
214 }
215 i++;
216 }
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
219
220 return mvi;
221
222}
223
224/* FIXME */
225int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
226{
227 unsigned long i = 0, j = 0, n = 0, num = 0;
228 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
229 struct mvs_info *mvi = mvi_dev->mvi_info;
230 struct sas_ha_struct *sha = dev->port->ha;
231
232 while (sha->sas_port[i]) {
233 if (sha->sas_port[i] == dev->port) {
234 struct asd_sas_phy *phy;
235 list_for_each_entry(phy,
236 &sha->sas_port[i]->phy_list, port_phy_el) {
237 j = 0;
238 while (sha->sas_phy[j]) {
239 if (sha->sas_phy[j] == phy)
240 break;
241 j++;
242 }
243 phyno[n] = (j >= mvi->chip->n_phy) ?
244 (j - mvi->chip->n_phy) : j;
245 num++;
246 n++;
247 }
248 break;
249 }
250 i++;
251 }
252 return num;
253}
254
255static inline void mvs_free_reg_set(struct mvs_info *mvi,
256 struct mvs_device *dev)
257{
258 if (!dev) {
259 mv_printk("device has been free.\n");
260 return;
261 }
262 if (dev->runing_req != 0)
263 return;
264 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
265 return;
266 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
267}
268
269static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
270 struct mvs_device *dev)
271{
272 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
273 return 0;
274 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
275}
276
277void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
278{
279 u32 no;
280 for_each_phy(phy_mask, phy_mask, no) {
281 if (!(phy_mask & 1))
282 continue;
283 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
284 }
285}
286
287/* FIXME: locking? */
288int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
289 void *funcdata)
290{
291 int rc = 0, phy_id = sas_phy->id;
292 u32 tmp, i = 0, hi;
293 struct sas_ha_struct *sha = sas_phy->ha;
294 struct mvs_info *mvi = NULL;
295
296 while (sha->sas_phy[i]) {
297 if (sha->sas_phy[i] == sas_phy)
298 break;
299 i++;
300 }
301 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
302 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
303
304 switch (func) {
305 case PHY_FUNC_SET_LINK_RATE:
306 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
307 break;
308
309 case PHY_FUNC_HARD_RESET:
310 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
311 if (tmp & PHY_RST_HARD)
312 break;
313 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
314 break;
315
316 case PHY_FUNC_LINK_RESET:
317 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
318 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
319 break;
320
321 case PHY_FUNC_DISABLE:
322 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
323 break;
324 case PHY_FUNC_RELEASE_SPINUP_HOLD:
325 default:
326 rc = -EOPNOTSUPP;
327 }
328 msleep(200);
329 return rc;
330}
331
332void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
333 u32 off_lo, u32 off_hi, u64 sas_addr)
334{
335 u32 lo = (u32)sas_addr;
336 u32 hi = (u32)(sas_addr>>32);
337
338 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
339 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
340 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
341 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
342}
343
344static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
345{
346 struct mvs_phy *phy = &mvi->phy[i];
347 struct asd_sas_phy *sas_phy = &phy->sas_phy;
348 struct sas_ha_struct *sas_ha;
349 if (!phy->phy_attached)
350 return;
351
352 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
353 && phy->phy_type & PORT_TYPE_SAS) {
354 return;
355 }
356
357 sas_ha = mvi->sas;
358 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
359
360 if (sas_phy->phy) {
361 struct sas_phy *sphy = sas_phy->phy;
362
363 sphy->negotiated_linkrate = sas_phy->linkrate;
364 sphy->minimum_linkrate = phy->minimum_linkrate;
365 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
366 sphy->maximum_linkrate = phy->maximum_linkrate;
367 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
368 }
369
370 if (phy->phy_type & PORT_TYPE_SAS) {
371 struct sas_identify_frame *id;
372
373 id = (struct sas_identify_frame *)phy->frame_rcvd;
374 id->dev_type = phy->identify.device_type;
375 id->initiator_bits = SAS_PROTOCOL_ALL;
376 id->target_bits = phy->identify.target_port_protocols;
377 } else if (phy->phy_type & PORT_TYPE_SATA) {
378 /*Nothing*/
379 }
380 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
381
382 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
383
384 mvi->sas->notify_port_event(sas_phy,
385 PORTE_BYTES_DMAED);
386}
387
388int mvs_slave_alloc(struct scsi_device *scsi_dev)
389{
390 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
391 if (dev_is_sata(dev)) {
392 /* We don't need to rescan targets
393 * if REPORT_LUNS request is failed
394 */
395 if (scsi_dev->lun > 0)
396 return -ENXIO;
397 scsi_dev->tagged_supported = 1;
398 }
399
400 return sas_slave_alloc(scsi_dev);
401}
402
403int mvs_slave_configure(struct scsi_device *sdev)
404{
405 struct domain_device *dev = sdev_to_domain_dev(sdev);
406 int ret = sas_slave_configure(sdev);
407
408 if (ret)
409 return ret;
410 if (dev_is_sata(dev)) {
411 /* may set PIO mode */
412 #if MV_DISABLE_NCQ
413 struct ata_port *ap = dev->sata_dev.ap;
414 struct ata_device *adev = ap->link.device;
415 adev->flags |= ATA_DFLAG_NCQ_OFF;
416 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
417 #endif
418 }
419 return 0;
420}
421
422void mvs_scan_start(struct Scsi_Host *shost)
423{
424 int i, j;
425 unsigned short core_nr;
426 struct mvs_info *mvi;
427 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
428
429 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
430
431 for (j = 0; j < core_nr; j++) {
432 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
433 for (i = 0; i < mvi->chip->n_phy; ++i)
434 mvs_bytes_dmaed(mvi, i);
435 }
436}
437
438int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
439{
440 /* give the phy enabling interrupt event time to come in (1s
441 * is empirically about all it takes) */
442 if (time < HZ)
443 return 0;
444 /* Wait for discovery to finish */
445 scsi_flush_work(shost);
446 return 1;
447}
448
449static int mvs_task_prep_smp(struct mvs_info *mvi,
450 struct mvs_task_exec_info *tei)
451{
452 int elem, rc, i;
453 struct sas_task *task = tei->task;
454 struct mvs_cmd_hdr *hdr = tei->hdr;
455 struct domain_device *dev = task->dev;
456 struct asd_sas_port *sas_port = dev->port;
457 struct scatterlist *sg_req, *sg_resp;
458 u32 req_len, resp_len, tag = tei->tag;
459 void *buf_tmp;
460 u8 *buf_oaf;
461 dma_addr_t buf_tmp_dma;
462 void *buf_prd;
463 struct mvs_slot_info *slot = &mvi->slot_info[tag];
464 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
465#if _MV_DUMP
466 u8 *buf_cmd;
467 void *from;
468#endif
469 /*
470 * DMA-map SMP request, response buffers
471 */
472 sg_req = &task->smp_task.smp_req;
473 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
474 if (!elem)
475 return -ENOMEM;
476 req_len = sg_dma_len(sg_req);
477
478 sg_resp = &task->smp_task.smp_resp;
479 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
480 if (!elem) {
481 rc = -ENOMEM;
482 goto err_out;
483 }
484 resp_len = SB_RFB_MAX;
485
486 /* must be in dwords */
487 if ((req_len & 0x3) || (resp_len & 0x3)) {
488 rc = -EINVAL;
489 goto err_out_2;
490 }
491
492 /*
493 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
494 */
495
496 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
497 buf_tmp = slot->buf;
498 buf_tmp_dma = slot->buf_dma;
499
500#if _MV_DUMP
501 buf_cmd = buf_tmp;
502 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
503 buf_tmp += req_len;
504 buf_tmp_dma += req_len;
505 slot->cmd_size = req_len;
506#else
507 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
508#endif
509
510 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
511 buf_oaf = buf_tmp;
512 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
513
514 buf_tmp += MVS_OAF_SZ;
515 buf_tmp_dma += MVS_OAF_SZ;
516
517 /* region 3: PRD table *********************************** */
518 buf_prd = buf_tmp;
519 if (tei->n_elem)
520 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
521 else
522 hdr->prd_tbl = 0;
523
524 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
525 buf_tmp += i;
526 buf_tmp_dma += i;
527
528 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
529 slot->response = buf_tmp;
530 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
531 if (mvi->flags & MVF_FLAG_SOC)
532 hdr->reserved[0] = 0;
533
534 /*
535 * Fill in TX ring and command slot header
536 */
537 slot->tx = mvi->tx_prod;
538 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
539 TXQ_MODE_I | tag |
540 (sas_port->phy_mask << TXQ_PHY_SHIFT));
541
542 hdr->flags |= flags;
543 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
544 hdr->tags = cpu_to_le32(tag);
545 hdr->data_len = 0;
546
547 /* generate open address frame hdr (first 12 bytes) */
548 /* initiator, SMP, ftype 1h */
549 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
550 buf_oaf[1] = dev->linkrate & 0xf;
551 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
552 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
553
554 /* fill in PRD (scatter/gather) table, if any */
555 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
556
557#if _MV_DUMP
558 /* copy cmd table */
559 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
560 memcpy(buf_cmd, from + sg_req->offset, req_len);
561 kunmap_atomic(from, KM_IRQ0);
562#endif
563 return 0;
564
565err_out_2:
566 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
567 PCI_DMA_FROMDEVICE);
568err_out:
569 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
570 PCI_DMA_TODEVICE);
571 return rc;
572}
573
574static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
575{
576 struct ata_queued_cmd *qc = task->uldd_task;
577
578 if (qc) {
579 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
580 qc->tf.command == ATA_CMD_FPDMA_READ) {
581 *tag = qc->tag;
582 return 1;
583 }
584 }
585
586 return 0;
587}
588
589static int mvs_task_prep_ata(struct mvs_info *mvi,
590 struct mvs_task_exec_info *tei)
591{
592 struct sas_task *task = tei->task;
593 struct domain_device *dev = task->dev;
594 struct mvs_device *mvi_dev = dev->lldd_dev;
595 struct mvs_cmd_hdr *hdr = tei->hdr;
596 struct asd_sas_port *sas_port = dev->port;
597 struct mvs_slot_info *slot;
598 void *buf_prd;
599 u32 tag = tei->tag, hdr_tag;
600 u32 flags, del_q;
601 void *buf_tmp;
602 u8 *buf_cmd, *buf_oaf;
603 dma_addr_t buf_tmp_dma;
604 u32 i, req_len, resp_len;
605 const u32 max_resp_len = SB_RFB_MAX;
606
607 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
608 mv_dprintk("Have not enough regiset for dev %d.\n",
609 mvi_dev->device_id);
610 return -EBUSY;
611 }
612 slot = &mvi->slot_info[tag];
613 slot->tx = mvi->tx_prod;
614 del_q = TXQ_MODE_I | tag |
615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
617 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
618 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
619
620#ifndef DISABLE_HOTPLUG_DMA_FIX
621 if (task->data_dir == DMA_FROM_DEVICE)
622 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
623 else
624 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
625#else
626 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
627#endif
628 if (task->ata_task.use_ncq)
629 flags |= MCH_FPDMA;
630 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
631 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
632 flags |= MCH_ATAPI;
633 }
634
635 /* FIXME: fill in port multiplier number */
636
637 hdr->flags = cpu_to_le32(flags);
638
639 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
640 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
641 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
642 else
643 hdr_tag = tag;
644
645 hdr->tags = cpu_to_le32(hdr_tag);
646
647 hdr->data_len = cpu_to_le32(task->total_xfer_len);
648
649 /*
650 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
651 */
652
653 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
654 buf_cmd = buf_tmp = slot->buf;
655 buf_tmp_dma = slot->buf_dma;
656
657 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
658
659 buf_tmp += MVS_ATA_CMD_SZ;
660 buf_tmp_dma += MVS_ATA_CMD_SZ;
661#if _MV_DUMP
662 slot->cmd_size = MVS_ATA_CMD_SZ;
663#endif
664
665 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
666 /* used for STP. unused for SATA? */
667 buf_oaf = buf_tmp;
668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
669
670 buf_tmp += MVS_OAF_SZ;
671 buf_tmp_dma += MVS_OAF_SZ;
672
673 /* region 3: PRD table ********************************************* */
674 buf_prd = buf_tmp;
675
676 if (tei->n_elem)
677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
678 else
679 hdr->prd_tbl = 0;
680 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
681
682 buf_tmp += i;
683 buf_tmp_dma += i;
684
685 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
686 /* FIXME: probably unused, for SATA. kept here just in case
687 * we get a STP/SATA error information record
688 */
689 slot->response = buf_tmp;
690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
691 if (mvi->flags & MVF_FLAG_SOC)
692 hdr->reserved[0] = 0;
693
694 req_len = sizeof(struct host_to_dev_fis);
695 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
696 sizeof(struct mvs_err_info) - i;
697
698 /* request, response lengths */
699 resp_len = min(resp_len, max_resp_len);
700 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
701
702 if (likely(!task->ata_task.device_control_reg_update))
703 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
704 /* fill in command FIS and ATAPI CDB */
705 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
706 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
707 memcpy(buf_cmd + STP_ATAPI_CMD,
708 task->ata_task.atapi_packet, 16);
709
710 /* generate open address frame hdr (first 12 bytes) */
711 /* initiator, STP, ftype 1h */
712 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
713 buf_oaf[1] = dev->linkrate & 0xf;
714 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
715 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
716
717 /* fill in PRD (scatter/gather) table, if any */
718 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
719#ifndef DISABLE_HOTPLUG_DMA_FIX
720 if (task->data_dir == DMA_FROM_DEVICE)
721 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
722 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
723#endif
724 return 0;
725}
726
727static int mvs_task_prep_ssp(struct mvs_info *mvi,
728 struct mvs_task_exec_info *tei, int is_tmf,
729 struct mvs_tmf_task *tmf)
730{
731 struct sas_task *task = tei->task;
732 struct mvs_cmd_hdr *hdr = tei->hdr;
733 struct mvs_port *port = tei->port;
734 struct domain_device *dev = task->dev;
735 struct mvs_device *mvi_dev = dev->lldd_dev;
736 struct asd_sas_port *sas_port = dev->port;
737 struct mvs_slot_info *slot;
738 void *buf_prd;
739 struct ssp_frame_hdr *ssp_hdr;
740 void *buf_tmp;
741 u8 *buf_cmd, *buf_oaf, fburst = 0;
742 dma_addr_t buf_tmp_dma;
743 u32 flags;
744 u32 resp_len, req_len, i, tag = tei->tag;
745 const u32 max_resp_len = SB_RFB_MAX;
746 u32 phy_mask;
747
748 slot = &mvi->slot_info[tag];
749
750 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
751 sas_port->phy_mask) & TXQ_PHY_MASK;
752
753 slot->tx = mvi->tx_prod;
754 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
755 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
756 (phy_mask << TXQ_PHY_SHIFT));
757
758 flags = MCH_RETRY;
759 if (task->ssp_task.enable_first_burst) {
760 flags |= MCH_FBURST;
761 fburst = (1 << 7);
762 }
763 if (is_tmf)
764 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
765 else
766 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
767 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
768 hdr->tags = cpu_to_le32(tag);
769 hdr->data_len = cpu_to_le32(task->total_xfer_len);
770
771 /*
772 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
773 */
774
775 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
776 buf_cmd = buf_tmp = slot->buf;
777 buf_tmp_dma = slot->buf_dma;
778
779 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
780
781 buf_tmp += MVS_SSP_CMD_SZ;
782 buf_tmp_dma += MVS_SSP_CMD_SZ;
783#if _MV_DUMP
784 slot->cmd_size = MVS_SSP_CMD_SZ;
785#endif
786
787 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
788 buf_oaf = buf_tmp;
789 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
790
791 buf_tmp += MVS_OAF_SZ;
792 buf_tmp_dma += MVS_OAF_SZ;
793
794 /* region 3: PRD table ********************************************* */
795 buf_prd = buf_tmp;
796 if (tei->n_elem)
797 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
798 else
799 hdr->prd_tbl = 0;
800
801 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
802 buf_tmp += i;
803 buf_tmp_dma += i;
804
805 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
806 slot->response = buf_tmp;
807 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
808 if (mvi->flags & MVF_FLAG_SOC)
809 hdr->reserved[0] = 0;
810
811 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
812 sizeof(struct mvs_err_info) - i;
813 resp_len = min(resp_len, max_resp_len);
814
815 req_len = sizeof(struct ssp_frame_hdr) + 28;
816
817 /* request, response lengths */
818 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
819
820 /* generate open address frame hdr (first 12 bytes) */
821 /* initiator, SSP, ftype 1h */
822 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
823 buf_oaf[1] = dev->linkrate & 0xf;
824 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
825 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
826
827 /* fill in SSP frame header (Command Table.SSP frame header) */
828 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
829
830 if (is_tmf)
831 ssp_hdr->frame_type = SSP_TASK;
832 else
833 ssp_hdr->frame_type = SSP_COMMAND;
834
835 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
836 HASHED_SAS_ADDR_SIZE);
837 memcpy(ssp_hdr->hashed_src_addr,
838 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
839 ssp_hdr->tag = cpu_to_be16(tag);
840
841 /* fill in IU for TASK and Command Frame */
842 buf_cmd += sizeof(*ssp_hdr);
843 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
844
845 if (ssp_hdr->frame_type != SSP_TASK) {
846 buf_cmd[9] = fburst | task->ssp_task.task_attr |
847 (task->ssp_task.task_prio << 3);
848 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
849 } else{
850 buf_cmd[10] = tmf->tmf;
851 switch (tmf->tmf) {
852 case TMF_ABORT_TASK:
853 case TMF_QUERY_TASK:
854 buf_cmd[12] =
855 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
856 buf_cmd[13] =
857 tmf->tag_of_task_to_be_managed & 0xff;
858 break;
859 default:
860 break;
861 }
862 }
863 /* fill in PRD (scatter/gather) table, if any */
864 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
865 return 0;
866}
867
868#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
869static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
870 struct completion *completion,int is_tmf,
871 struct mvs_tmf_task *tmf)
872{
873 struct domain_device *dev = task->dev;
874 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
875 struct mvs_info *mvi = mvi_dev->mvi_info;
876 struct mvs_task_exec_info tei;
877 struct sas_task *t = task;
878 struct mvs_slot_info *slot;
879 u32 tag = 0xdeadbeef, rc, n_elem = 0;
880 u32 n = num, pass = 0;
881 unsigned long flags = 0;
882
883 if (!dev->port) {
884 struct task_status_struct *tsm = &t->task_status;
885
886 tsm->resp = SAS_TASK_UNDELIVERED;
887 tsm->stat = SAS_PHY_DOWN;
888 t->task_done(t);
889 return 0;
890 }
891
892 spin_lock_irqsave(&mvi->lock, flags);
893 do {
894 dev = t->dev;
895 mvi_dev = dev->lldd_dev;
896 if (DEV_IS_GONE(mvi_dev)) {
897 if (mvi_dev)
898 mv_dprintk("device %d not ready.\n",
899 mvi_dev->device_id);
900 else
901 mv_dprintk("device %016llx not ready.\n",
902 SAS_ADDR(dev->sas_addr));
903
904 rc = SAS_PHY_DOWN;
905 goto out_done;
906 }
907
908 if (dev->port->id >= mvi->chip->n_phy)
909 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
910 else
911 tei.port = &mvi->port[dev->port->id];
912
913 if (!tei.port->port_attached) {
914 if (sas_protocol_ata(t->task_proto)) {
915 mv_dprintk("port %d does not"
916 "attached device.\n", dev->port->id);
917 rc = SAS_PHY_DOWN;
918 goto out_done;
919 } else {
920 struct task_status_struct *ts = &t->task_status;
921 ts->resp = SAS_TASK_UNDELIVERED;
922 ts->stat = SAS_PHY_DOWN;
923 t->task_done(t);
924 if (n > 1)
925 t = list_entry(t->list.next,
926 struct sas_task, list);
927 continue;
928 }
929 }
930
931 if (!sas_protocol_ata(t->task_proto)) {
932 if (t->num_scatter) {
933 n_elem = dma_map_sg(mvi->dev,
934 t->scatter,
935 t->num_scatter,
936 t->data_dir);
937 if (!n_elem) {
938 rc = -ENOMEM;
939 goto err_out;
940 }
941 }
942 } else {
943 n_elem = t->num_scatter;
944 }
945
946 rc = mvs_tag_alloc(mvi, &tag);
947 if (rc)
948 goto err_out;
949
950 slot = &mvi->slot_info[tag];
951
952
953 t->lldd_task = NULL;
954 slot->n_elem = n_elem;
955 slot->slot_tag = tag;
956 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
957
958 tei.task = t;
959 tei.hdr = &mvi->slot[tag];
960 tei.tag = tag;
961 tei.n_elem = n_elem;
962 switch (t->task_proto) {
963 case SAS_PROTOCOL_SMP:
964 rc = mvs_task_prep_smp(mvi, &tei);
965 break;
966 case SAS_PROTOCOL_SSP:
967 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
968 break;
969 case SAS_PROTOCOL_SATA:
970 case SAS_PROTOCOL_STP:
971 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
972 rc = mvs_task_prep_ata(mvi, &tei);
973 break;
974 default:
975 dev_printk(KERN_ERR, mvi->dev,
976 "unknown sas_task proto: 0x%x\n",
977 t->task_proto);
978 rc = -EINVAL;
979 break;
980 }
981
982 if (rc) {
983 mv_dprintk("rc is %x\n", rc);
984 goto err_out_tag;
985 }
986 slot->task = t;
987 slot->port = tei.port;
988 t->lldd_task = slot;
989 list_add_tail(&slot->entry, &tei.port->list);
990 /* TODO: select normal or high priority */
991 spin_lock(&t->task_state_lock);
992 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
993 spin_unlock(&t->task_state_lock);
994
995 mvs_hba_memory_dump(mvi, tag, t->task_proto);
996 mvi_dev->runing_req++;
997 ++pass;
998 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
999 if (n > 1)
1000 t = list_entry(t->list.next, struct sas_task, list);
1001 } while (--n);
1002 rc = 0;
1003 goto out_done;
1004
1005err_out_tag:
1006 mvs_tag_free(mvi, tag);
1007err_out:
1008
1009 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1010 if (!sas_protocol_ata(t->task_proto))
1011 if (n_elem)
1012 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1013 t->data_dir);
1014out_done:
1015 if (likely(pass)) {
1016 MVS_CHIP_DISP->start_delivery(mvi,
1017 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1018 }
1019 spin_unlock_irqrestore(&mvi->lock, flags);
1020 return rc;
1021}
1022
1023int mvs_queue_command(struct sas_task *task, const int num,
1024 gfp_t gfp_flags)
1025{
1026 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1027}
1028
1029static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1030{
1031 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1032 mvs_tag_clear(mvi, slot_idx);
1033}
1034
1035static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1036 struct mvs_slot_info *slot, u32 slot_idx)
1037{
1038 if (!slot->task)
1039 return;
1040 if (!sas_protocol_ata(task->task_proto))
1041 if (slot->n_elem)
1042 dma_unmap_sg(mvi->dev, task->scatter,
1043 slot->n_elem, task->data_dir);
1044
1045 switch (task->task_proto) {
1046 case SAS_PROTOCOL_SMP:
1047 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1048 PCI_DMA_FROMDEVICE);
1049 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1050 PCI_DMA_TODEVICE);
1051 break;
1052
1053 case SAS_PROTOCOL_SATA:
1054 case SAS_PROTOCOL_STP:
1055 case SAS_PROTOCOL_SSP:
1056 default:
1057 /* do nothing */
1058 break;
1059 }
1060 list_del_init(&slot->entry);
1061 task->lldd_task = NULL;
1062 slot->task = NULL;
1063 slot->port = NULL;
1064 slot->slot_tag = 0xFFFFFFFF;
1065 mvs_slot_free(mvi, slot_idx);
1066}
1067
1068static void mvs_update_wideport(struct mvs_info *mvi, int i)
1069{
1070 struct mvs_phy *phy = &mvi->phy[i];
1071 struct mvs_port *port = phy->port;
1072 int j, no;
1073
1074 for_each_phy(port->wide_port_phymap, j, no) {
1075 if (j & 1) {
1076 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1077 PHYR_WIDE_PORT);
1078 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1079 port->wide_port_phymap);
1080 } else {
1081 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1082 PHYR_WIDE_PORT);
1083 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1084 0);
1085 }
1086 }
1087}
1088
1089static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1090{
1091 u32 tmp;
1092 struct mvs_phy *phy = &mvi->phy[i];
1093 struct mvs_port *port = phy->port;
1094
1095 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1096 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1097 if (!port)
1098 phy->phy_attached = 1;
1099 return tmp;
1100 }
1101
1102 if (port) {
1103 if (phy->phy_type & PORT_TYPE_SAS) {
1104 port->wide_port_phymap &= ~(1U << i);
1105 if (!port->wide_port_phymap)
1106 port->port_attached = 0;
1107 mvs_update_wideport(mvi, i);
1108 } else if (phy->phy_type & PORT_TYPE_SATA)
1109 port->port_attached = 0;
1110 phy->port = NULL;
1111 phy->phy_attached = 0;
1112 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1113 }
1114 return 0;
1115}
1116
1117static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1118{
1119 u32 *s = (u32 *) buf;
1120
1121 if (!s)
1122 return NULL;
1123
1124 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1125 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1126
1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1128 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1129
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1131 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1132
1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1134 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1135
1136 /* Workaround: take some ATAPI devices for ATA */
1137 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1138 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1139
1140 return s;
1141}
1142
1143static u32 mvs_is_sig_fis_received(u32 irq_status)
1144{
1145 return irq_status & PHYEV_SIG_FIS;
1146}
1147
1148void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1149{
1150 struct mvs_phy *phy = &mvi->phy[i];
1151 struct sas_identify_frame *id;
1152
1153 id = (struct sas_identify_frame *)phy->frame_rcvd;
1154
1155 if (get_st) {
1156 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1157 phy->phy_status = mvs_is_phy_ready(mvi, i);
1158 }
1159
1160 if (phy->phy_status) {
1161 int oob_done = 0;
1162 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1163
1164 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1165
1166 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1167 if (phy->phy_type & PORT_TYPE_SATA) {
1168 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1169 if (mvs_is_sig_fis_received(phy->irq_status)) {
1170 phy->phy_attached = 1;
1171 phy->att_dev_sas_addr =
1172 i + mvi->id * mvi->chip->n_phy;
1173 if (oob_done)
1174 sas_phy->oob_mode = SATA_OOB_MODE;
1175 phy->frame_rcvd_size =
1176 sizeof(struct dev_to_host_fis);
1177 mvs_get_d2h_reg(mvi, i, id);
1178 } else {
1179 u32 tmp;
1180 dev_printk(KERN_DEBUG, mvi->dev,
1181 "Phy%d : No sig fis\n", i);
1182 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1183 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1184 tmp | PHYEV_SIG_FIS);
1185 phy->phy_attached = 0;
1186 phy->phy_type &= ~PORT_TYPE_SATA;
1187 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1188 goto out_done;
1189 }
1190 } else if (phy->phy_type & PORT_TYPE_SAS
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1;
1193 phy->identify.device_type =
1194 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1195
1196 if (phy->identify.device_type == SAS_END_DEV)
1197 phy->identify.target_port_protocols =
1198 SAS_PROTOCOL_SSP;
1199 else if (phy->identify.device_type != NO_DEVICE)
1200 phy->identify.target_port_protocols =
1201 SAS_PROTOCOL_SMP;
1202 if (oob_done)
1203 sas_phy->oob_mode = SAS_OOB_MODE;
1204 phy->frame_rcvd_size =
1205 sizeof(struct sas_identify_frame);
1206 }
1207 memcpy(sas_phy->attached_sas_addr,
1208 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1209
1210 if (MVS_CHIP_DISP->phy_work_around)
1211 MVS_CHIP_DISP->phy_work_around(mvi, i);
1212 }
1213 mv_dprintk("port %d attach dev info is %x\n",
1214 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1215 mv_dprintk("port %d attach sas addr is %llx\n",
1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1217out_done:
1218 if (get_st)
1219 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1220}
1221
1222static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1223{
1224 struct sas_ha_struct *sas_ha = sas_phy->ha;
1225 struct mvs_info *mvi = NULL; int i = 0, hi;
1226 struct mvs_phy *phy = sas_phy->lldd_phy;
1227 struct asd_sas_port *sas_port = sas_phy->port;
1228 struct mvs_port *port;
1229 unsigned long flags = 0;
1230 if (!sas_port)
1231 return;
1232
1233 while (sas_ha->sas_phy[i]) {
1234 if (sas_ha->sas_phy[i] == sas_phy)
1235 break;
1236 i++;
1237 }
1238 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1239 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1240 if (sas_port->id >= mvi->chip->n_phy)
1241 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1242 else
1243 port = &mvi->port[sas_port->id];
1244 if (lock)
1245 spin_lock_irqsave(&mvi->lock, flags);
1246 port->port_attached = 1;
1247 phy->port = port;
1248 if (phy->phy_type & PORT_TYPE_SAS) {
1249 port->wide_port_phymap = sas_port->phy_mask;
1250 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1251 mvs_update_wideport(mvi, sas_phy->id);
1252 }
1253 if (lock)
1254 spin_unlock_irqrestore(&mvi->lock, flags);
1255}
1256
1257static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1258{
1259 /*Nothing*/
1260}
1261
1262
1263void mvs_port_formed(struct asd_sas_phy *sas_phy)
1264{
1265 mvs_port_notify_formed(sas_phy, 1);
1266}
1267
1268void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1269{
1270 mvs_port_notify_deformed(sas_phy, 1);
1271}
1272
1273struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1274{
1275 u32 dev;
1276 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1277 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1278 mvi->devices[dev].device_id = dev;
1279 return &mvi->devices[dev];
1280 }
1281 }
1282
1283 if (dev == MVS_MAX_DEVICES)
1284 mv_printk("max support %d devices, ignore ..\n",
1285 MVS_MAX_DEVICES);
1286
1287 return NULL;
1288}
1289
1290void mvs_free_dev(struct mvs_device *mvi_dev)
1291{
1292 u32 id = mvi_dev->device_id;
1293 memset(mvi_dev, 0, sizeof(*mvi_dev));
1294 mvi_dev->device_id = id;
1295 mvi_dev->dev_type = NO_DEVICE;
1296 mvi_dev->dev_status = MVS_DEV_NORMAL;
1297 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1298}
1299
1300int mvs_dev_found_notify(struct domain_device *dev, int lock)
1301{
1302 unsigned long flags = 0;
1303 int res = 0;
1304 struct mvs_info *mvi = NULL;
1305 struct domain_device *parent_dev = dev->parent;
1306 struct mvs_device *mvi_device;
1307
1308 mvi = mvs_find_dev_mvi(dev);
1309
1310 if (lock)
1311 spin_lock_irqsave(&mvi->lock, flags);
1312
1313 mvi_device = mvs_alloc_dev(mvi);
1314 if (!mvi_device) {
1315 res = -1;
1316 goto found_out;
1317 }
1318 dev->lldd_dev = mvi_device;
1319 mvi_device->dev_type = dev->dev_type;
1320 mvi_device->mvi_info = mvi;
1321 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1322 int phy_id;
1323 u8 phy_num = parent_dev->ex_dev.num_phys;
1324 struct ex_phy *phy;
1325 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1326 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1327 if (SAS_ADDR(phy->attached_sas_addr) ==
1328 SAS_ADDR(dev->sas_addr)) {
1329 mvi_device->attached_phy = phy_id;
1330 break;
1331 }
1332 }
1333
1334 if (phy_id == phy_num) {
1335 mv_printk("Error: no attached dev:%016llx"
1336 "at ex:%016llx.\n",
1337 SAS_ADDR(dev->sas_addr),
1338 SAS_ADDR(parent_dev->sas_addr));
1339 res = -1;
1340 }
1341 }
1342
1343found_out:
1344 if (lock)
1345 spin_unlock_irqrestore(&mvi->lock, flags);
1346 return res;
1347}
1348
1349int mvs_dev_found(struct domain_device *dev)
1350{
1351 return mvs_dev_found_notify(dev, 1);
1352}
1353
1354void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1355{
1356 unsigned long flags = 0;
1357 struct mvs_device *mvi_dev = dev->lldd_dev;
1358 struct mvs_info *mvi = mvi_dev->mvi_info;
1359
1360 if (lock)
1361 spin_lock_irqsave(&mvi->lock, flags);
1362
1363 if (mvi_dev) {
1364 mv_dprintk("found dev[%d:%x] is gone.\n",
1365 mvi_dev->device_id, mvi_dev->dev_type);
1366 mvs_free_reg_set(mvi, mvi_dev);
1367 mvs_free_dev(mvi_dev);
1368 } else {
1369 mv_dprintk("found dev has gone.\n");
1370 }
1371 dev->lldd_dev = NULL;
1372
1373 if (lock)
1374 spin_unlock_irqrestore(&mvi->lock, flags);
1375}
1376
1377
1378void mvs_dev_gone(struct domain_device *dev)
1379{
1380 mvs_dev_gone_notify(dev, 1);
1381}
1382
1383static struct sas_task *mvs_alloc_task(void)
1384{
1385 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1386
1387 if (task) {
1388 INIT_LIST_HEAD(&task->list);
1389 spin_lock_init(&task->task_state_lock);
1390 task->task_state_flags = SAS_TASK_STATE_PENDING;
1391 init_timer(&task->timer);
1392 init_completion(&task->completion);
1393 }
1394 return task;
1395}
1396
1397static void mvs_free_task(struct sas_task *task)
1398{
1399 if (task) {
1400 BUG_ON(!list_empty(&task->list));
1401 kfree(task);
1402 }
1403}
1404
1405static void mvs_task_done(struct sas_task *task)
1406{
1407 if (!del_timer(&task->timer))
1408 return;
1409 complete(&task->completion);
1410}
1411
1412static void mvs_tmf_timedout(unsigned long data)
1413{
1414 struct sas_task *task = (struct sas_task *)data;
1415
1416 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1417 complete(&task->completion);
1418}
1419
1420/* XXX */
1421#define MVS_TASK_TIMEOUT 20
1422static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1423 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1424{
1425 int res, retry;
1426 struct sas_task *task = NULL;
1427
1428 for (retry = 0; retry < 3; retry++) {
1429 task = mvs_alloc_task();
1430 if (!task)
1431 return -ENOMEM;
1432
1433 task->dev = dev;
1434 task->task_proto = dev->tproto;
1435
1436 memcpy(&task->ssp_task, parameter, para_len);
1437 task->task_done = mvs_task_done;
1438
1439 task->timer.data = (unsigned long) task;
1440 task->timer.function = mvs_tmf_timedout;
1441 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1442 add_timer(&task->timer);
1443
1444 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1445
1446 if (res) {
1447 del_timer(&task->timer);
1448 mv_printk("executing internel task failed:%d\n", res);
1449 goto ex_err;
1450 }
1451
1452 wait_for_completion(&task->completion);
1453 res = -TMF_RESP_FUNC_FAILED;
1454 /* Even TMF timed out, return direct. */
1455 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1456 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1457 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1458 goto ex_err;
1459 }
1460 }
1461
1462 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1463 task->task_status.stat == SAM_GOOD) {
1464 res = TMF_RESP_FUNC_COMPLETE;
1465 break;
1466 }
1467
1468 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1469 task->task_status.stat == SAS_DATA_UNDERRUN) {
1470 /* no error, but return the number of bytes of
1471 * underrun */
1472 res = task->task_status.residual;
1473 break;
1474 }
1475
1476 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1477 task->task_status.stat == SAS_DATA_OVERRUN) {
1478 mv_dprintk("blocked task error.\n");
1479 res = -EMSGSIZE;
1480 break;
1481 } else {
1482 mv_dprintk(" task to dev %016llx response: 0x%x "
1483 "status 0x%x\n",
1484 SAS_ADDR(dev->sas_addr),
1485 task->task_status.resp,
1486 task->task_status.stat);
1487 mvs_free_task(task);
1488 task = NULL;
1489
1490 }
1491 }
1492ex_err:
1493 BUG_ON(retry == 3 && task != NULL);
1494 if (task != NULL)
1495 mvs_free_task(task);
1496 return res;
1497}
1498
1499static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1500 u8 *lun, struct mvs_tmf_task *tmf)
1501{
1502 struct sas_ssp_task ssp_task;
1503 DECLARE_COMPLETION_ONSTACK(completion);
1504 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1505 return TMF_RESP_FUNC_ESUPP;
1506
1507 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1508
1509 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1510 sizeof(ssp_task), tmf);
1511}
1512
1513
1514/* Standard mandates link reset for ATA (type 0)
1515 and hard reset for SSP (type 1) , only for RECOVERY */
1516static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1517{
1518 int rc;
1519 struct sas_phy *phy = sas_find_local_phy(dev);
1520 int reset_type = (dev->dev_type == SATA_DEV ||
1521 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1522 rc = sas_phy_reset(phy, reset_type);
1523 msleep(2000);
1524 return rc;
1525}
1526
1527/* mandatory SAM-3 */
1528int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1529{
1530 unsigned long flags;
1531 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1532 struct mvs_tmf_task tmf_task;
1533 struct mvs_device * mvi_dev = dev->lldd_dev;
1534 struct mvs_info *mvi = mvi_dev->mvi_info;
1535
1536 tmf_task.tmf = TMF_LU_RESET;
1537 mvi_dev->dev_status = MVS_DEV_EH;
1538 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1539 if (rc == TMF_RESP_FUNC_COMPLETE) {
1540 num = mvs_find_dev_phyno(dev, phyno);
1541 spin_lock_irqsave(&mvi->lock, flags);
1542 for (i = 0; i < num; i++)
1543 mvs_release_task(mvi, phyno[i], dev);
1544 spin_unlock_irqrestore(&mvi->lock, flags);
1545 }
1546 /* If failed, fall-through I_T_Nexus reset */
1547 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1548 mvi_dev->device_id, rc);
1549 return rc;
1550}
1551
1552int mvs_I_T_nexus_reset(struct domain_device *dev)
1553{
1554 unsigned long flags;
1555 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1556 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1557 struct mvs_info *mvi = mvi_dev->mvi_info;
1558
1559 if (mvi_dev->dev_status != MVS_DEV_EH)
1560 return TMF_RESP_FUNC_COMPLETE;
1561 rc = mvs_debug_I_T_nexus_reset(dev);
1562 mv_printk("%s for device[%x]:rc= %d\n",
1563 __func__, mvi_dev->device_id, rc);
1564
1565 /* housekeeper */
1566 num = mvs_find_dev_phyno(dev, phyno);
1567 spin_lock_irqsave(&mvi->lock, flags);
1568 for (i = 0; i < num; i++)
1569 mvs_release_task(mvi, phyno[i], dev);
1570 spin_unlock_irqrestore(&mvi->lock, flags);
1571
1572 return rc;
1573}
1574/* optional SAM-3 */
1575int mvs_query_task(struct sas_task *task)
1576{
1577 u32 tag;
1578 struct scsi_lun lun;
1579 struct mvs_tmf_task tmf_task;
1580 int rc = TMF_RESP_FUNC_FAILED;
1581
1582 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1583 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1584 struct domain_device *dev = task->dev;
1585 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1586 struct mvs_info *mvi = mvi_dev->mvi_info;
1587
1588 int_to_scsilun(cmnd->device->lun, &lun);
1589 rc = mvs_find_tag(mvi, task, &tag);
1590 if (rc == 0) {
1591 rc = TMF_RESP_FUNC_FAILED;
1592 return rc;
1593 }
1594
1595 tmf_task.tmf = TMF_QUERY_TASK;
1596 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1597
1598 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1599 switch (rc) {
1600 /* The task is still in Lun, release it then */
1601 case TMF_RESP_FUNC_SUCC:
1602 /* The task is not in Lun or failed, reset the phy */
1603 case TMF_RESP_FUNC_FAILED:
1604 case TMF_RESP_FUNC_COMPLETE:
1605 break;
1606 }
1607 }
1608 mv_printk("%s:rc= %d\n", __func__, rc);
1609 return rc;
1610}
1611
1612/* mandatory SAM-3, still need free task/slot info */
1613int mvs_abort_task(struct sas_task *task)
1614{
1615 struct scsi_lun lun;
1616 struct mvs_tmf_task tmf_task;
1617 struct domain_device *dev = task->dev;
1618 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1619 struct mvs_info *mvi = mvi_dev->mvi_info;
1620 int rc = TMF_RESP_FUNC_FAILED;
1621 unsigned long flags;
1622 u32 tag;
1623
1624 if (mvi->exp_req)
1625 mvi->exp_req--;
1626 spin_lock_irqsave(&task->task_state_lock, flags);
1627 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1628 spin_unlock_irqrestore(&task->task_state_lock, flags);
1629 rc = TMF_RESP_FUNC_COMPLETE;
1630 goto out;
1631 }
1632 spin_unlock_irqrestore(&task->task_state_lock, flags);
1633 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1634 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1635
1636 int_to_scsilun(cmnd->device->lun, &lun);
1637 rc = mvs_find_tag(mvi, task, &tag);
1638 if (rc == 0) {
1639 mv_printk("No such tag in %s\n", __func__);
1640 rc = TMF_RESP_FUNC_FAILED;
1641 return rc;
1642 }
1643
1644 tmf_task.tmf = TMF_ABORT_TASK;
1645 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1646
1647 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1648
1649 /* if successful, clear the task and callback forwards.*/
1650 if (rc == TMF_RESP_FUNC_COMPLETE) {
1651 u32 slot_no;
1652 struct mvs_slot_info *slot;
1653
1654 if (task->lldd_task) {
1655 slot = task->lldd_task;
1656 slot_no = (u32) (slot - mvi->slot_info);
1657 mvs_slot_complete(mvi, slot_no, 1);
1658 }
1659 }
1660 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1661 task->task_proto & SAS_PROTOCOL_STP) {
1662 /* to do free register_set */
1663 } else {
1664 /* SMP */
1665
1666 }
1667out:
1668 if (rc != TMF_RESP_FUNC_COMPLETE)
1669 mv_printk("%s:rc= %d\n", __func__, rc);
1670 return rc;
1671}
1672
1673int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1674{
1675 int rc = TMF_RESP_FUNC_FAILED;
1676 struct mvs_tmf_task tmf_task;
1677
1678 tmf_task.tmf = TMF_ABORT_TASK_SET;
1679 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1680
1681 return rc;
1682}
1683
1684int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1685{
1686 int rc = TMF_RESP_FUNC_FAILED;
1687 struct mvs_tmf_task tmf_task;
1688
1689 tmf_task.tmf = TMF_CLEAR_ACA;
1690 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1691
1692 return rc;
1693}
1694
1695int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1696{
1697 int rc = TMF_RESP_FUNC_FAILED;
1698 struct mvs_tmf_task tmf_task;
1699
1700 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1701 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1702
1703 return rc;
1704}
1705
1706static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1707 u32 slot_idx, int err)
1708{
1709 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1710 struct task_status_struct *tstat = &task->task_status;
1711 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1712 int stat = SAM_GOOD;
1713
1714
1715 resp->frame_len = sizeof(struct dev_to_host_fis);
1716 memcpy(&resp->ending_fis[0],
1717 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1718 sizeof(struct dev_to_host_fis));
1719 tstat->buf_valid_size = sizeof(*resp);
1720 if (unlikely(err))
1721 stat = SAS_PROTO_RESPONSE;
1722 return stat;
1723}
1724
1725static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1726 u32 slot_idx)
1727{
1728 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1729 int stat;
1730 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1731 u32 tfs = 0;
1732 enum mvs_port_type type = PORT_TYPE_SAS;
1733
1734 if (err_dw0 & CMD_ISS_STPD)
1735 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1736
1737 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1738
1739 stat = SAM_CHECK_COND;
1740 switch (task->task_proto) {
1741 case SAS_PROTOCOL_SSP:
1742 stat = SAS_ABORTED_TASK;
1743 break;
1744 case SAS_PROTOCOL_SMP:
1745 stat = SAM_CHECK_COND;
1746 break;
1747
1748 case SAS_PROTOCOL_SATA:
1749 case SAS_PROTOCOL_STP:
1750 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1751 {
1752 if (err_dw0 == 0x80400002)
1753 mv_printk("find reserved error, why?\n");
1754
1755 task->ata_task.use_ncq = 0;
1756 stat = SAS_PROTO_RESPONSE;
1757 mvs_sata_done(mvi, task, slot_idx, 1);
1758
1759 }
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 return stat;
1766}
1767
1768int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1769{
1770 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1771 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1772 struct sas_task *task = slot->task;
1773 struct mvs_device *mvi_dev = NULL;
1774 struct task_status_struct *tstat;
1775
1776 bool aborted;
1777 void *to;
1778 enum exec_status sts;
1779
1780 if (mvi->exp_req)
1781 mvi->exp_req--;
1782 if (unlikely(!task || !task->lldd_task))
1783 return -1;
1784
1785 tstat = &task->task_status;
1786 mvi_dev = task->dev->lldd_dev;
1787
1788 mvs_hba_cq_dump(mvi);
1789
1790 spin_lock(&task->task_state_lock);
1791 task->task_state_flags &=
1792 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1793 task->task_state_flags |= SAS_TASK_STATE_DONE;
1794 /* race condition*/
1795 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1796 spin_unlock(&task->task_state_lock);
1797
1798 memset(tstat, 0, sizeof(*tstat));
1799 tstat->resp = SAS_TASK_COMPLETE;
1800
1801 if (unlikely(aborted)) {
1802 tstat->stat = SAS_ABORTED_TASK;
1803 if (mvi_dev)
1804 mvi_dev->runing_req--;
1805 if (sas_protocol_ata(task->task_proto))
1806 mvs_free_reg_set(mvi, mvi_dev);
1807
1808 mvs_slot_task_free(mvi, task, slot, slot_idx);
1809 return -1;
1810 }
1811
1812 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
1813 mv_dprintk("port has not device.\n");
1814 tstat->stat = SAS_PHY_DOWN;
1815 goto out;
1816 }
1817
1818 /*
1819 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1820 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1821 err info:%016llx\n",
1822 SAS_ADDR(task->dev->sas_addr),
1823 rx_desc, (u64)(*(u64 *) slot->response));
1824 }
1825 */
1826
1827 /* error info record present */
1828 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1829 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1830 goto out;
1831 }
1832
1833 switch (task->task_proto) {
1834 case SAS_PROTOCOL_SSP:
1835 /* hw says status == 0, datapres == 0 */
1836 if (rx_desc & RXQ_GOOD) {
1837 tstat->stat = SAM_GOOD;
1838 tstat->resp = SAS_TASK_COMPLETE;
1839 }
1840 /* response frame present */
1841 else if (rx_desc & RXQ_RSP) {
1842 struct ssp_response_iu *iu = slot->response +
1843 sizeof(struct mvs_err_info);
1844 sas_ssp_task_response(mvi->dev, task, iu);
1845 } else
1846 tstat->stat = SAM_CHECK_COND;
1847 break;
1848
1849 case SAS_PROTOCOL_SMP: {
1850 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1851 tstat->stat = SAM_GOOD;
1852 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1853 memcpy(to + sg_resp->offset,
1854 slot->response + sizeof(struct mvs_err_info),
1855 sg_dma_len(sg_resp));
1856 kunmap_atomic(to, KM_IRQ0);
1857 break;
1858 }
1859
1860 case SAS_PROTOCOL_SATA:
1861 case SAS_PROTOCOL_STP:
1862 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1863 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1864 break;
1865 }
1866
1867 default:
1868 tstat->stat = SAM_CHECK_COND;
1869 break;
1870 }
1871
1872out:
1873 if (mvi_dev) {
1874 mvi_dev->runing_req--;
1875 if (sas_protocol_ata(task->task_proto))
1876 mvs_free_reg_set(mvi, mvi_dev);
1877 }
1878 mvs_slot_task_free(mvi, task, slot, slot_idx);
1879 sts = tstat->stat;
1880
1881 spin_unlock(&mvi->lock);
1882 if (task->task_done)
1883 task->task_done(task);
1884 else
1885 mv_dprintk("why has not task_done.\n");
1886 spin_lock(&mvi->lock);
1887
1888 return sts;
1889}
1890
1891void mvs_release_task(struct mvs_info *mvi,
1892 int phy_no, struct domain_device *dev)
1893{
1894 int i = 0; u32 slot_idx;
1895 struct mvs_phy *phy;
1896 struct mvs_port *port;
1897 struct mvs_slot_info *slot, *slot2;
1898
1899 phy = &mvi->phy[phy_no];
1900 port = phy->port;
1901 if (!port)
1902 return;
1903
1904 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1905 struct sas_task *task;
1906 slot_idx = (u32) (slot - mvi->slot_info);
1907 task = slot->task;
1908
1909 if (dev && task->dev != dev)
1910 continue;
1911
1912 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1913 slot_idx, slot->slot_tag, task);
1914
1915 if (task->task_proto & SAS_PROTOCOL_SSP) {
1916 mv_printk("attached with SSP task CDB[");
1917 for (i = 0; i < 16; i++)
1918 mv_printk(" %02x", task->ssp_task.cdb[i]);
1919 mv_printk(" ]\n");
1920 }
1921
1922 mvs_slot_complete(mvi, slot_idx, 1);
1923 }
1924}
1925
1926static void mvs_phy_disconnected(struct mvs_phy *phy)
1927{
1928 phy->phy_attached = 0;
1929 phy->att_dev_info = 0;
1930 phy->att_dev_sas_addr = 0;
1931}
1932
1933static void mvs_work_queue(struct work_struct *work)
1934{
1935 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1936 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1937 struct mvs_info *mvi = mwq->mvi;
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&mvi->lock, flags);
1941 if (mwq->handler & PHY_PLUG_EVENT) {
1942 u32 phy_no = (unsigned long) mwq->data;
1943 struct sas_ha_struct *sas_ha = mvi->sas;
1944 struct mvs_phy *phy = &mvi->phy[phy_no];
1945 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1946
1947 if (phy->phy_event & PHY_PLUG_OUT) {
1948 u32 tmp;
1949 struct sas_identify_frame *id;
1950 id = (struct sas_identify_frame *)phy->frame_rcvd;
1951 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1952 phy->phy_event &= ~PHY_PLUG_OUT;
1953 if (!(tmp & PHY_READY_MASK)) {
1954 sas_phy_disconnected(sas_phy);
1955 mvs_phy_disconnected(phy);
1956 sas_ha->notify_phy_event(sas_phy,
1957 PHYE_LOSS_OF_SIGNAL);
1958 mv_dprintk("phy%d Removed Device\n", phy_no);
1959 } else {
1960 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1961 mvs_update_phyinfo(mvi, phy_no, 1);
1962 mvs_bytes_dmaed(mvi, phy_no);
1963 mvs_port_notify_formed(sas_phy, 0);
1964 mv_dprintk("phy%d Attached Device\n", phy_no);
1965 }
1966 }
1967 }
1968 list_del(&mwq->entry);
1969 spin_unlock_irqrestore(&mvi->lock, flags);
1970 kfree(mwq);
1971}
1972
1973static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1974{
1975 struct mvs_wq *mwq;
1976 int ret = 0;
1977
1978 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1979 if (mwq) {
1980 mwq->mvi = mvi;
1981 mwq->data = data;
1982 mwq->handler = handler;
1983 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1984 list_add_tail(&mwq->entry, &mvi->wq_list);
1985 schedule_delayed_work(&mwq->work_q, HZ * 2);
1986 } else
1987 ret = -ENOMEM;
1988
1989 return ret;
1990}
1991
1992static void mvs_sig_time_out(unsigned long tphy)
1993{
1994 struct mvs_phy *phy = (struct mvs_phy *)tphy;
1995 struct mvs_info *mvi = phy->mvi;
1996 u8 phy_no;
1997
1998 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
1999 if (&mvi->phy[phy_no] == phy) {
2000 mv_dprintk("Get signature time out, reset phy %d\n",
2001 phy_no+mvi->id*mvi->chip->n_phy);
2002 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2003 }
2004 }
2005}
2006
2007static void mvs_sig_remove_timer(struct mvs_phy *phy)
2008{
2009 if (phy->timer.function)
2010 del_timer(&phy->timer);
2011 phy->timer.function = NULL;
2012}
2013
2014void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2015{
2016 u32 tmp;
2017 struct sas_ha_struct *sas_ha = mvi->sas;
2018 struct mvs_phy *phy = &mvi->phy[phy_no];
2019 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2020
2021 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2022 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2023 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2024 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2025 phy->irq_status);
2026
2027 /*
2028 * events is port event now ,
2029 * we need check the interrupt status which belongs to per port.
2030 */
2031
2032 if (phy->irq_status & PHYEV_DCDR_ERR)
2033 mv_dprintk("port %d STP decoding error.\n",
2034 phy_no+mvi->id*mvi->chip->n_phy);
2035
2036 if (phy->irq_status & PHYEV_POOF) {
2037 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2038 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2039 int ready;
2040 mvs_release_task(mvi, phy_no, NULL);
2041 phy->phy_event |= PHY_PLUG_OUT;
2042 mvs_handle_event(mvi,
2043 (void *)(unsigned long)phy_no,
2044 PHY_PLUG_EVENT);
2045 ready = mvs_is_phy_ready(mvi, phy_no);
2046 if (!ready)
2047 mv_dprintk("phy%d Unplug Notice\n",
2048 phy_no +
2049 mvi->id * mvi->chip->n_phy);
2050 if (ready || dev_sata) {
2051 if (MVS_CHIP_DISP->stp_reset)
2052 MVS_CHIP_DISP->stp_reset(mvi,
2053 phy_no);
2054 else
2055 MVS_CHIP_DISP->phy_reset(mvi,
2056 phy_no, 0);
2057 return;
2058 }
2059 }
2060 }
2061
2062 if (phy->irq_status & PHYEV_COMWAKE) {
2063 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2064 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2065 tmp | PHYEV_SIG_FIS);
2066 if (phy->timer.function == NULL) {
2067 phy->timer.data = (unsigned long)phy;
2068 phy->timer.function = mvs_sig_time_out;
2069 phy->timer.expires = jiffies + 10*HZ;
2070 add_timer(&phy->timer);
2071 }
2072 }
2073 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2074 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2075 mvs_sig_remove_timer(phy);
2076 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2077 if (phy->phy_status) {
2078 mdelay(10);
2079 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2080 if (phy->phy_type & PORT_TYPE_SATA) {
2081 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2082 mvi, phy_no);
2083 tmp &= ~PHYEV_SIG_FIS;
2084 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2085 phy_no, tmp);
2086 }
2087 mvs_update_phyinfo(mvi, phy_no, 0);
2088 mvs_bytes_dmaed(mvi, phy_no);
2089 /* whether driver is going to handle hot plug */
2090 if (phy->phy_event & PHY_PLUG_OUT) {
2091 mvs_port_notify_formed(sas_phy, 0);
2092 phy->phy_event &= ~PHY_PLUG_OUT;
2093 }
2094 } else {
2095 mv_dprintk("plugin interrupt but phy%d is gone\n",
2096 phy_no + mvi->id*mvi->chip->n_phy);
2097 }
2098 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2099 mv_dprintk("port %d broadcast change.\n",
2100 phy_no + mvi->id*mvi->chip->n_phy);
2101 /* exception for Samsung disk drive*/
2102 mdelay(1000);
2103 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2104 }
2105 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2106}
2107
2108int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2109{
2110 u32 rx_prod_idx, rx_desc;
2111 bool attn = false;
2112
2113 /* the first dword in the RX ring is special: it contains
2114 * a mirror of the hardware's RX producer index, so that
2115 * we don't have to stall the CPU reading that register.
2116 * The actual RX ring is offset by one dword, due to this.
2117 */
2118 rx_prod_idx = mvi->rx_cons;
2119 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2120 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2121 return 0;
2122
2123 /* The CMPL_Q may come late, read from register and try again
2124 * note: if coalescing is enabled,
2125 * it will need to read from register every time for sure
2126 */
2127 if (unlikely(mvi->rx_cons == rx_prod_idx))
2128 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2129
2130 if (mvi->rx_cons == rx_prod_idx)
2131 return 0;
2132
2133 while (mvi->rx_cons != rx_prod_idx) {
2134 /* increment our internal RX consumer pointer */
2135 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2136 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2137
2138 if (likely(rx_desc & RXQ_DONE))
2139 mvs_slot_complete(mvi, rx_desc, 0);
2140 if (rx_desc & RXQ_ATTN) {
2141 attn = true;
2142 } else if (rx_desc & RXQ_ERR) {
2143 if (!(rx_desc & RXQ_DONE))
2144 mvs_slot_complete(mvi, rx_desc, 0);
2145 } else if (rx_desc & RXQ_SLOT_RESET) {
2146 mvs_slot_free(mvi, rx_desc);
2147 }
2148 }
2149
2150 if (attn && self_clear)
2151 MVS_CHIP_DISP->int_full(mvi);
2152 return 0;
2153}
2154
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 000000000000..aa2270af1bac
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,406 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_SAS_H_
26#define _MV_SAS_H_
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/spinlock.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/dma-mapping.h>
35#include <linux/pci.h>
36#include <linux/platform_device.h>
37#include <linux/interrupt.h>
38#include <linux/irq.h>
39#include <linux/vmalloc.h>
40#include <scsi/libsas.h>
41#include <scsi/scsi_tcq.h>
42#include <scsi/sas_ata.h>
43#include <linux/version.h>
44#include "mv_defs.h"
45
46#define DRV_NAME "mvsas"
47#define DRV_VERSION "0.8.2"
48#define _MV_DUMP 0
49#define MVS_ID_NOT_MAPPED 0x7f
50/* #define DISABLE_HOTPLUG_DMA_FIX */
51#define MAX_EXP_RUNNING_REQ 2
52#define WIDE_PORT_MAX_PHY 4
53#define MV_DISABLE_NCQ 0
54#define mv_printk(fmt, arg ...) \
55 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
56#ifdef MV_DEBUG
57#define mv_dprintk(format, arg...) \
58 printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
59#else
60#define mv_dprintk(format, arg...)
61#endif
62#define MV_MAX_U32 0xffffffff
63
64extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch;
68
69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV))
71
72#define bit(n) ((u32)1 << n)
73
74#define for_each_phy(__lseq_mask, __mc, __lseq) \
75 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
76 (__mc) != 0 ; \
77 (++__lseq), (__mc) >>= 1)
78
79#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
80#define UNASSOC_D2H_FIS(id) \
81 ((void *) mvi->rx_fis + 0x100 * id)
82#define SATA_RECEIVED_FIS_LIST(reg_set) \
83 ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
84#define SATA_RECEIVED_SDB_FIS(reg_set) \
85 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
86#define SATA_RECEIVED_D2H_FIS(reg_set) \
87 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
88#define SATA_RECEIVED_PIO_FIS(reg_set) \
89 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
90#define SATA_RECEIVED_DMA_FIS(reg_set) \
91 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
92
93enum dev_status {
94 MVS_DEV_NORMAL = 0x0,
95 MVS_DEV_EH = 0x1,
96};
97
98
99struct mvs_info;
100
101struct mvs_dispatch {
102 char *name;
103 int (*chip_init)(struct mvs_info *mvi);
104 int (*spi_init)(struct mvs_info *mvi);
105 int (*chip_ioremap)(struct mvs_info *mvi);
106 void (*chip_iounmap)(struct mvs_info *mvi);
107 irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
108 u32 (*isr_status)(struct mvs_info *mvi, int irq);
109 void (*interrupt_enable)(struct mvs_info *mvi);
110 void (*interrupt_disable)(struct mvs_info *mvi);
111
112 u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
113 void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
114
115 u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
116 void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
117 void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
118
119 u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
120 void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
121 void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
122
123 u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
124 void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
125
126 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
127 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
128
129 void (*get_sas_addr)(void *buf, u32 buflen);
130 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
131 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
132 u32 tfs);
133 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
134 u32 (*rx_update)(struct mvs_info *mvi);
135 void (*int_full)(struct mvs_info *mvi);
136 u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
137 void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
138 u32 (*prd_size)(void);
139 u32 (*prd_count)(void);
140 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
141 void (*detect_porttype)(struct mvs_info *mvi, int i);
142 int (*oob_done)(struct mvs_info *mvi, int i);
143 void (*fix_phy_info)(struct mvs_info *mvi, int i,
144 struct sas_identify_frame *id);
145 void (*phy_work_around)(struct mvs_info *mvi, int i);
146 void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
147 struct sas_phy_linkrates *rates);
148 u32 (*phy_max_link_rate)(void);
149 void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
150 void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
151 void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
152 void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
153 void (*clear_active_cmds)(struct mvs_info *mvi);
154 u32 (*spi_read_data)(struct mvs_info *mvi);
155 void (*spi_write_data)(struct mvs_info *mvi, u32 data);
156 int (*spi_buildcmd)(struct mvs_info *mvi,
157 u32 *dwCmd,
158 u8 cmd,
159 u8 read,
160 u8 length,
161 u32 addr
162 );
163 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
164 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
165#ifndef DISABLE_HOTPLUG_DMA_FIX
166 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
167#endif
168
169};
170
171struct mvs_chip_info {
172 u32 n_host;
173 u32 n_phy;
174 u32 fis_offs;
175 u32 fis_count;
176 u32 srs_sz;
177 u32 slot_width;
178 const struct mvs_dispatch *dispatch;
179};
180#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
181#define MVS_RX_FISL_SZ \
182 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
183#define MVS_CHIP_DISP (mvi->chip->dispatch)
184
185struct mvs_err_info {
186 __le32 flags;
187 __le32 flags2;
188};
189
190struct mvs_cmd_hdr {
191 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
192 __le32 lens; /* cmd, max resp frame len */
193 __le32 tags; /* targ port xfer tag; tag */
194 __le32 data_len; /* data xfer len */
195 __le64 cmd_tbl; /* command table address */
196 __le64 open_frame; /* open addr frame address */
197 __le64 status_buf; /* status buffer address */
198 __le64 prd_tbl; /* PRD tbl address */
199 __le32 reserved[4];
200};
201
202struct mvs_port {
203 struct asd_sas_port sas_port;
204 u8 port_attached;
205 u8 wide_port_phymap;
206 struct list_head list;
207};
208
209struct mvs_phy {
210 struct mvs_info *mvi;
211 struct mvs_port *port;
212 struct asd_sas_phy sas_phy;
213 struct sas_identify identify;
214 struct scsi_device *sdev;
215 struct timer_list timer;
216 u64 dev_sas_addr;
217 u64 att_dev_sas_addr;
218 u32 att_dev_info;
219 u32 dev_info;
220 u32 phy_type;
221 u32 phy_status;
222 u32 irq_status;
223 u32 frame_rcvd_size;
224 u8 frame_rcvd[32];
225 u8 phy_attached;
226 u8 phy_mode;
227 u8 reserved[2];
228 u32 phy_event;
229 enum sas_linkrate minimum_linkrate;
230 enum sas_linkrate maximum_linkrate;
231};
232
233struct mvs_device {
234 struct list_head dev_entry;
235 enum sas_dev_type dev_type;
236 struct mvs_info *mvi_info;
237 struct domain_device *sas_device;
238 u32 attached_phy;
239 u32 device_id;
240 u32 runing_req;
241 u8 taskfileset;
242 u8 dev_status;
243 u16 reserved;
244};
245
246struct mvs_slot_info {
247 struct list_head entry;
248 union {
249 struct sas_task *task;
250 void *tdata;
251 };
252 u32 n_elem;
253 u32 tx;
254 u32 slot_tag;
255
256 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
257 * and PRD table
258 */
259 void *buf;
260 dma_addr_t buf_dma;
261#if _MV_DUMP
262 u32 cmd_size;
263#endif
264 void *response;
265 struct mvs_port *port;
266 struct mvs_device *device;
267 void *open_frame;
268};
269
270struct mvs_info {
271 unsigned long flags;
272
273 /* host-wide lock */
274 spinlock_t lock;
275
276 /* our device */
277 struct pci_dev *pdev;
278 struct device *dev;
279
280 /* enhanced mode registers */
281 void __iomem *regs;
282
283 /* peripheral or soc registers */
284 void __iomem *regs_ex;
285 u8 sas_addr[SAS_ADDR_SIZE];
286
287 /* SCSI/SAS glue */
288 struct sas_ha_struct *sas;
289 struct Scsi_Host *shost;
290
291 /* TX (delivery) DMA ring */
292 __le32 *tx;
293 dma_addr_t tx_dma;
294
295 /* cached next-producer idx */
296 u32 tx_prod;
297
298 /* RX (completion) DMA ring */
299 __le32 *rx;
300 dma_addr_t rx_dma;
301
302 /* RX consumer idx */
303 u32 rx_cons;
304
305 /* RX'd FIS area */
306 __le32 *rx_fis;
307 dma_addr_t rx_fis_dma;
308
309 /* DMA command header slots */
310 struct mvs_cmd_hdr *slot;
311 dma_addr_t slot_dma;
312
313 u32 chip_id;
314 const struct mvs_chip_info *chip;
315
316 int tags_num;
317 DECLARE_BITMAP(tags, MVS_SLOTS);
318 /* further per-slot information */
319 struct mvs_phy phy[MVS_MAX_PHYS];
320 struct mvs_port port[MVS_MAX_PHYS];
321 u32 irq;
322 u32 exp_req;
323 u32 id;
324 u64 sata_reg_set;
325 struct list_head *hba_list;
326 struct list_head soc_entry;
327 struct list_head wq_list;
328 unsigned long instance;
329 u16 flashid;
330 u32 flashsize;
331 u32 flashsectSize;
332
333 void *addon;
334 struct mvs_device devices[MVS_MAX_DEVICES];
335#ifndef DISABLE_HOTPLUG_DMA_FIX
336 void *bulk_buffer;
337 dma_addr_t bulk_buffer_dma;
338#define TRASH_BUCKET_SIZE 0x20000
339#endif
340 struct mvs_slot_info slot_info[0];
341};
342
343struct mvs_prv_info{
344 u8 n_host;
345 u8 n_phy;
346 u16 reserve;
347 struct mvs_info *mvi[2];
348};
349
350struct mvs_wq {
351 struct delayed_work work_q;
352 struct mvs_info *mvi;
353 void *data;
354 int handler;
355 struct list_head entry;
356};
357
358struct mvs_task_exec_info {
359 struct sas_task *task;
360 struct mvs_cmd_hdr *hdr;
361 struct mvs_port *port;
362 u32 tag;
363 int n_elem;
364};
365
366
367/******************** function prototype *********************/
368void mvs_get_sas_addr(void *buf, u32 buflen);
369void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
370void mvs_tag_free(struct mvs_info *mvi, u32 tag);
371void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
372int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
373void mvs_tag_init(struct mvs_info *mvi);
374void mvs_iounmap(void __iomem *regs);
375int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
376void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
377int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
378 void *funcdata);
379void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
380 u32 off_lo, u32 off_hi, u64 sas_addr);
381int mvs_slave_alloc(struct scsi_device *scsi_dev);
382int mvs_slave_configure(struct scsi_device *sdev);
383void mvs_scan_start(struct Scsi_Host *shost);
384int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
385int mvs_queue_command(struct sas_task *task, const int num,
386 gfp_t gfp_flags);
387int mvs_abort_task(struct sas_task *task);
388int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
389int mvs_clear_aca(struct domain_device *dev, u8 *lun);
390int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
391void mvs_port_formed(struct asd_sas_phy *sas_phy);
392void mvs_port_deformed(struct asd_sas_phy *sas_phy);
393int mvs_dev_found(struct domain_device *dev);
394void mvs_dev_gone(struct domain_device *dev);
395int mvs_lu_reset(struct domain_device *dev, u8 *lun);
396int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
397int mvs_I_T_nexus_reset(struct domain_device *dev);
398int mvs_query_task(struct sas_task *task);
399void mvs_release_task(struct mvs_info *mvi, int phy_no,
400 struct domain_device *dev);
401void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
402void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
403int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
404void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
405#endif
406
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 0e207aa67d16..5fd73d77c3af 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -11,31 +11,6 @@
11# it under the terms of the GNU General Public License version 2 11# it under the terms of the GNU General Public License version 2
12# 12#
13 13
14ifneq ($(OSD_INC),)
15# we are built out-of-tree Kconfigure everything as on
16
17CONFIG_SCSI_OSD_INITIATOR=m
18ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
19
20CONFIG_SCSI_OSD_ULD=m
21ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
22
23# CONFIG_SCSI_OSD_DPRINT_SENSE =
24# 0 - no print of errors
25# 1 - print errors
26# 2 - errors + warrnings
27ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
28
29# Uncomment to turn debug on
30# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
31
32# if we are built out-of-tree and the hosting kernel has OSD headers
33# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
34# this it will work. This might break in future kernels
35LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
36
37endif
38
39# libosd.ko - osd-initiator library 14# libosd.ko - osd-initiator library
40libosd-y := osd_initiator.o 15libosd-y := osd_initiator.o
41obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o 16obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755
index d905344f83ba..000000000000
--- a/drivers/scsi/osd/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the OSD modules (out of tree)
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13# This Makefile is used to call the kernel Makefile in case of an out-of-tree
14# build.
15# $KSRC should point to a Kernel source tree otherwise host's default is
16# used. (eg. /lib/modules/`uname -r`/build)
17
18# include path for out-of-tree Headers
19OSD_INC ?= `pwd`/../../../include
20
21# allow users to override these
22# e.g. to compile for a kernel that you aren't currently running
23KSRC ?= /lib/modules/$(shell uname -r)/build
24KBUILD_OUTPUT ?=
25ARCH ?=
26V ?= 0
27
28# this is the basic Kbuild out-of-tree invocation, with the M= option
29KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
30
31all: libosd
32
33libosd: ;
34 $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
35
36clean:
37 $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 5776b2ab6b12..7a117c18114c 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
118 _osd_ver_desc(or)); 118 _osd_ver_desc(or));
119 119
120 pFirst = get_attrs[a++].val_ptr; 120 pFirst = get_attrs[a++].val_ptr;
121 OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", 121 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
122 (char *)pFirst); 122 (char *)pFirst);
123 123
124 pFirst = get_attrs[a++].val_ptr; 124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", 125 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
126 (char *)pFirst); 126 (char *)pFirst);
127 127
128 pFirst = get_attrs[a++].val_ptr; 128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", 129 OSD_INFO("PRODUCT_MODEL [%s]\n",
130 (char *)pFirst); 130 (char *)pFirst);
131 131
132 pFirst = get_attrs[a++].val_ptr; 132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", 133 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
134 pFirst ? get_unaligned_be32(pFirst) : ~0U); 134 pFirst ? get_unaligned_be32(pFirst) : ~0U);
135 135
136 pFirst = get_attrs[a++].val_ptr; 136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", 137 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst); 138 (char *)pFirst);
139 139
140 pFirst = get_attrs[a].val_ptr; 140 pFirst = get_attrs[a].val_ptr;
141 OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); 141 OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst);
142 a++; 142 a++;
143 143
144 pFirst = get_attrs[a++].val_ptr; 144 pFirst = get_attrs[a++].val_ptr;
145 OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", 145 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
147 147
148 pFirst = get_attrs[a++].val_ptr; 148 pFirst = get_attrs[a++].val_ptr;
149 OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", 149 OSD_INFO("USED_CAPACITY [0x%llx]\n",
150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
151 151
152 pFirst = get_attrs[a++].val_ptr; 152 pFirst = get_attrs[a++].val_ptr;
153 OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", 153 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155 155
156 if (a >= nelem) 156 if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
158 158
159 /* FIXME: Where are the time utilities */ 159 /* FIXME: Where are the time utilities */
160 pFirst = get_attrs[a++].val_ptr; 160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", 161 OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
162 ((char *)pFirst)[0], ((char *)pFirst)[1], 162 ((char *)pFirst)[0], ((char *)pFirst)[1],
163 ((char *)pFirst)[2], ((char *)pFirst)[3], 163 ((char *)pFirst)[2], ((char *)pFirst)[3],
164 ((char *)pFirst)[4], ((char *)pFirst)[5]); 164 ((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
169 169
170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, 170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
171 sid_dump, sizeof(sid_dump), true); 171 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); 172 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
173 " [%s]\n", len, sid_dump);
173 a++; 174 a++;
174 } 175 }
175out: 176out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
669 __be16 action, const struct osd_obj_id *obj, osd_id initial_id, 670 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
670 struct osd_obj_id_list *list, unsigned nelem) 671 struct osd_obj_id_list *list, unsigned nelem)
671{ 672{
672 struct request_queue *q = or->osd_dev->scsi_device->request_queue; 673 struct request_queue *q = osd_request_queue(or->osd_dev);
673 u64 len = nelem * sizeof(osd_id) + sizeof(*list); 674 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
674 struct bio *bio; 675 struct bio *bio;
675 676
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
778*/ 779*/
779 780
780void osd_req_write(struct osd_request *or, 781void osd_req_write(struct osd_request *or,
781 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 782 const struct osd_obj_id *obj, u64 offset,
783 struct bio *bio, u64 len)
782{ 784{
783 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); 785 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
784 WARN_ON(or->out.bio || or->out.total_bytes); 786 WARN_ON(or->out.bio || or->out.total_bytes);
785 bio->bi_rw |= (1 << BIO_RW); 787 WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
786 or->out.bio = bio; 788 or->out.bio = bio;
787 or->out.total_bytes = bio->bi_size; 789 or->out.total_bytes = len;
788} 790}
789EXPORT_SYMBOL(osd_req_write); 791EXPORT_SYMBOL(osd_req_write);
790 792
793int osd_req_write_kern(struct osd_request *or,
794 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
795{
796 struct request_queue *req_q = osd_request_queue(or->osd_dev);
797 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
798
799 if (IS_ERR(bio))
800 return PTR_ERR(bio);
801
802 bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
803 osd_req_write(or, obj, offset, bio, len);
804 return 0;
805}
806EXPORT_SYMBOL(osd_req_write_kern);
807
791/*TODO: void osd_req_append(struct osd_request *, 808/*TODO: void osd_req_append(struct osd_request *,
792 const struct osd_obj_id *, struct bio *data_out); */ 809 const struct osd_obj_id *, struct bio *data_out); */
793/*TODO: void osd_req_create_write(struct osd_request *, 810/*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
813EXPORT_SYMBOL(osd_req_flush_object); 830EXPORT_SYMBOL(osd_req_flush_object);
814 831
815void osd_req_read(struct osd_request *or, 832void osd_req_read(struct osd_request *or,
816 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 833 const struct osd_obj_id *obj, u64 offset,
834 struct bio *bio, u64 len)
817{ 835{
818 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); 836 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
819 WARN_ON(or->in.bio || or->in.total_bytes); 837 WARN_ON(or->in.bio || or->in.total_bytes);
820 bio->bi_rw &= ~(1 << BIO_RW); 838 WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
821 or->in.bio = bio; 839 or->in.bio = bio;
822 or->in.total_bytes = bio->bi_size; 840 or->in.total_bytes = len;
823} 841}
824EXPORT_SYMBOL(osd_req_read); 842EXPORT_SYMBOL(osd_req_read);
825 843
844int osd_req_read_kern(struct osd_request *or,
845 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
846{
847 struct request_queue *req_q = osd_request_queue(or->osd_dev);
848 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
849
850 if (IS_ERR(bio))
851 return PTR_ERR(bio);
852
853 osd_req_read(or, obj, offset, bio, len);
854 return 0;
855}
856EXPORT_SYMBOL(osd_req_read_kern);
857
826void osd_req_get_attributes(struct osd_request *or, 858void osd_req_get_attributes(struct osd_request *or,
827 const struct osd_obj_id *obj) 859 const struct osd_obj_id *obj)
828{ 860{
@@ -1213,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
1213} 1245}
1214 1246
1215static int _osd_req_finalize_data_integrity(struct osd_request *or, 1247static int _osd_req_finalize_data_integrity(struct osd_request *or,
1216 bool has_in, bool has_out, const u8 *cap_key) 1248 bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
1217{ 1249{
1218 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1250 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1219 int ret; 1251 int ret;
@@ -1228,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1228 }; 1260 };
1229 unsigned pad; 1261 unsigned pad;
1230 1262
1231 or->out_data_integ.data_bytes = cpu_to_be64( 1263 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1232 or->out.bio ? or->out.bio->bi_size : 0);
1233 or->out_data_integ.set_attributes_bytes = cpu_to_be64( 1264 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1234 or->set_attr.total_bytes); 1265 or->set_attr.total_bytes);
1235 or->out_data_integ.get_attributes_bytes = cpu_to_be64( 1266 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1306,6 +1337,8 @@ static int _init_blk_request(struct osd_request *or,
1306 1337
1307 or->request = req; 1338 or->request = req;
1308 req->cmd_type = REQ_TYPE_BLOCK_PC; 1339 req->cmd_type = REQ_TYPE_BLOCK_PC;
1340 req->cmd_flags |= REQ_QUIET;
1341
1309 req->timeout = or->timeout; 1342 req->timeout = or->timeout;
1310 req->retries = or->retries; 1343 req->retries = or->retries;
1311 req->sense = or->sense; 1344 req->sense = or->sense;
@@ -1339,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
1339{ 1372{
1340 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1373 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1341 bool has_in, has_out; 1374 bool has_in, has_out;
1375 u64 out_data_bytes = or->out.total_bytes;
1342 int ret; 1376 int ret;
1343 1377
1344 if (options & OSD_REQ_FUA) 1378 if (options & OSD_REQ_FUA)
@@ -1388,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
1388 } 1422 }
1389 } 1423 }
1390 1424
1391 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); 1425 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1426 out_data_bytes, cap_key);
1392 if (ret) 1427 if (ret)
1393 return ret; 1428 return ret;
1394 1429
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 22b59e13ba83..0bdef3390902 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -49,6 +49,7 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/idr.h> 50#include <linux/idr.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/file.h>
52 53
53#include <scsi/scsi.h> 54#include <scsi/scsi.h>
54#include <scsi/scsi_driver.h> 55#include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
175 176
176struct osd_dev *osduld_path_lookup(const char *name) 177struct osd_dev *osduld_path_lookup(const char *name)
177{ 178{
178 struct path path; 179 struct osd_uld_device *oud;
179 struct inode *inode; 180 struct osd_dev *od;
180 struct cdev *cdev; 181 struct file *file;
181 struct osd_uld_device *uninitialized_var(oud);
182 int error; 182 int error;
183 183
184 if (!name || !*name) { 184 if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
186 return ERR_PTR(-EINVAL); 186 return ERR_PTR(-EINVAL);
187 } 187 }
188 188
189 error = kern_path(name, LOOKUP_FOLLOW, &path); 189 od = kzalloc(sizeof(*od), GFP_KERNEL);
190 if (error) { 190 if (!od)
191 OSD_ERR("path_lookup of %s failed=>%d\n", name, error); 191 return ERR_PTR(-ENOMEM);
192 return ERR_PTR(error);
193 }
194 192
195 inode = path.dentry->d_inode; 193 file = filp_open(name, O_RDWR, 0);
196 error = -EINVAL; /* Not the right device e.g osd_uld_device */ 194 if (IS_ERR(file)) {
197 if (!S_ISCHR(inode->i_mode)) { 195 error = PTR_ERR(file);
198 OSD_DEBUG("!S_ISCHR()\n"); 196 goto free_od;
199 goto out;
200 } 197 }
201 198
202 cdev = inode->i_cdev; 199 if (file->f_op != &osd_fops){
203 if (!cdev) { 200 error = -EINVAL;
204 OSD_ERR("Before mounting an OSD Based filesystem\n"); 201 goto close_file;
205 OSD_ERR(" user-mode must open+close the %s device\n", name);
206 OSD_ERR(" Example: bash: echo < %s\n", name);
207 goto out;
208 } 202 }
209 203
210 /* The Magic wand. Is it our char-dev */ 204 oud = file->private_data;
211 /* TODO: Support sg devices */
212 if (cdev->owner != THIS_MODULE) {
213 OSD_ERR("Error mounting %s - is not an OSD device\n", name);
214 goto out;
215 }
216 205
217 oud = container_of(cdev, struct osd_uld_device, cdev); 206 *od = oud->od;
207 od->file = file;
218 208
219 __uld_get(oud); 209 return od;
220 error = 0;
221 210
222out: 211close_file:
223 path_put(&path); 212 fput(file);
224 return error ? ERR_PTR(error) : &oud->od; 213free_od:
214 kfree(od);
215 return ERR_PTR(error);
225} 216}
226EXPORT_SYMBOL(osduld_path_lookup); 217EXPORT_SYMBOL(osduld_path_lookup);
227 218
228void osduld_put_device(struct osd_dev *od) 219void osduld_put_device(struct osd_dev *od)
229{ 220{
230 if (od) {
231 struct osd_uld_device *oud = container_of(od,
232 struct osd_uld_device, od);
233 221
234 __uld_put(oud); 222 if (od && !IS_ERR(od)) {
223 struct osd_uld_device *oud = od->file->private_data;
224
225 BUG_ON(od->scsi_device != oud->od.scsi_device);
226
227 fput(od->file);
228 kfree(od);
235 } 229 }
236} 230}
237EXPORT_SYMBOL(osduld_put_device); 231EXPORT_SYMBOL(osduld_put_device);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5defe5ea5eda..8371d917a9a2 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,12 @@
17* General Public License for more details. 17* General Public License for more details.
18* 18*
19******************************************************************************/ 19******************************************************************************/
20#define QLA1280_VERSION "3.26" 20#define QLA1280_VERSION "3.27"
21/***************************************************************************** 21/*****************************************************************************
22 Revision History: 22 Revision History:
23 Rev 3.27, February 10, 2009, Michael Reed
24 - General code cleanup.
25 - Improve error recovery.
23 Rev 3.26, January 16, 2006 Jes Sorensen 26 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support 27 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig 28 Rev 3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
435 uint8_t, uint16_t *); 438 uint8_t, uint16_t *);
436static int qla1280_bus_reset(struct scsi_qla_host *, int); 439static int qla1280_bus_reset(struct scsi_qla_host *, int);
437static int qla1280_device_reset(struct scsi_qla_host *, int, int); 440static int qla1280_device_reset(struct scsi_qla_host *, int, int);
438static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
439static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 441static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
440static int qla1280_abort_isp(struct scsi_qla_host *); 442static int qla1280_abort_isp(struct scsi_qla_host *);
441#ifdef QLA_64BIT_PTR 443#ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
698} 700}
699 701
700/************************************************************************** 702/**************************************************************************
701 * qla1200_queuecommand 703 * qla1280_queuecommand
702 * Queue a command to the controller. 704 * Queue a command to the controller.
703 * 705 *
704 * Note: 706 * Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
713{ 715{
714 struct Scsi_Host *host = cmd->device->host; 716 struct Scsi_Host *host = cmd->device->host;
715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 717 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
716 struct srb *sp = (struct srb *)&cmd->SCp; 718 struct srb *sp = (struct srb *)CMD_SP(cmd);
717 int status; 719 int status;
718 720
719 cmd->scsi_done = fn; 721 cmd->scsi_done = fn;
720 sp->cmd = cmd; 722 sp->cmd = cmd;
721 sp->flags = 0; 723 sp->flags = 0;
724 sp->wait = NULL;
725 CMD_HANDLE(cmd) = (unsigned char *)NULL;
722 726
723 qla1280_print_scsi_cmd(5, cmd); 727 qla1280_print_scsi_cmd(5, cmd);
724 728
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
738 742
739enum action { 743enum action {
740 ABORT_COMMAND, 744 ABORT_COMMAND,
741 ABORT_DEVICE,
742 DEVICE_RESET, 745 DEVICE_RESET,
743 BUS_RESET, 746 BUS_RESET,
744 ADAPTER_RESET, 747 ADAPTER_RESET,
745 FAIL
746}; 748};
747 749
748/* timer action for error action processor */
749static void qla1280_error_wait_timeout(unsigned long __data)
750{
751 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
752 struct srb *sp = (struct srb *)CMD_SP(cmd);
753
754 complete(sp->wait);
755}
756 750
757static void qla1280_mailbox_timeout(unsigned long __data) 751static void qla1280_mailbox_timeout(unsigned long __data)
758{ 752{
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
767 complete(ha->mailbox_wait); 761 complete(ha->mailbox_wait);
768} 762}
769 763
764static int
765_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
766 struct completion *wait)
767{
768 int status = FAILED;
769 struct scsi_cmnd *cmd = sp->cmd;
770
771 spin_unlock_irq(ha->host->host_lock);
772 wait_for_completion_timeout(wait, 4*HZ);
773 spin_lock_irq(ha->host->host_lock);
774 sp->wait = NULL;
775 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
776 status = SUCCESS;
777 (*cmd->scsi_done)(cmd);
778 }
779 return status;
780}
781
782static int
783qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
784{
785 DECLARE_COMPLETION_ONSTACK(wait);
786
787 sp->wait = &wait;
788 return _qla1280_wait_for_single_command(ha, sp, &wait);
789}
790
791static int
792qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
793{
794 int cnt;
795 int status;
796 struct srb *sp;
797 struct scsi_cmnd *cmd;
798
799 status = SUCCESS;
800
801 /*
802 * Wait for all commands with the designated bus/target
803 * to be completed by the firmware
804 */
805 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
806 sp = ha->outstanding_cmds[cnt];
807 if (sp) {
808 cmd = sp->cmd;
809
810 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
811 continue;
812 if (target >= 0 && SCSI_TCN_32(cmd) != target)
813 continue;
814
815 status = qla1280_wait_for_single_command(ha, sp);
816 if (status == FAILED)
817 break;
818 }
819 }
820 return status;
821}
822
770/************************************************************************** 823/**************************************************************************
771 * qla1200_error_action 824 * qla1280_error_action
772 * The function will attempt to perform a specified error action and 825 * The function will attempt to perform a specified error action and
773 * wait for the results (or time out). 826 * wait for the results (or time out).
774 * 827 *
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
780 * Returns: 833 * Returns:
781 * SUCCESS or FAILED 834 * SUCCESS or FAILED
782 * 835 *
783 * Note:
784 * Resetting the bus always succeeds - is has to, otherwise the
785 * kernel will panic! Try a surgical technique - sending a BUS
786 * DEVICE RESET message - on the offending target before pulling
787 * the SCSI bus reset line.
788 **************************************************************************/ 836 **************************************************************************/
789static int 837static int
790qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 838qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
792 struct scsi_qla_host *ha; 840 struct scsi_qla_host *ha;
793 int bus, target, lun; 841 int bus, target, lun;
794 struct srb *sp; 842 struct srb *sp;
795 uint16_t data; 843 int i, found;
796 unsigned char *handle; 844 int result=FAILED;
797 int result, i; 845 int wait_for_bus=-1;
846 int wait_for_target = -1;
798 DECLARE_COMPLETION_ONSTACK(wait); 847 DECLARE_COMPLETION_ONSTACK(wait);
799 struct timer_list timer; 848
849 ENTER("qla1280_error_action");
800 850
801 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 851 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
852 sp = (struct srb *)CMD_SP(cmd);
853 bus = SCSI_BUS_32(cmd);
854 target = SCSI_TCN_32(cmd);
855 lun = SCSI_LUN_32(cmd);
802 856
803 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 857 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
804 RD_REG_WORD(&ha->iobase->istatus)); 858 RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
807 RD_REG_WORD(&ha->iobase->host_cmd), 861 RD_REG_WORD(&ha->iobase->host_cmd),
808 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 862 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
809 863
810 ENTER("qla1280_error_action");
811 if (qla1280_verbose) 864 if (qla1280_verbose)
812 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 865 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
813 "Handle=0x%p, action=0x%x\n", 866 "Handle=0x%p, action=0x%x\n",
814 ha->host_no, cmd, CMD_HANDLE(cmd), action); 867 ha->host_no, cmd, CMD_HANDLE(cmd), action);
815 868
816 if (cmd == NULL) {
817 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
818 "si_Cmnd pointer, failing.\n");
819 LEAVE("qla1280_error_action");
820 return FAILED;
821 }
822
823 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
824 sp = (struct srb *)CMD_SP(cmd);
825 handle = CMD_HANDLE(cmd);
826
827 /* Check for pending interrupts. */
828 data = qla1280_debounce_register(&ha->iobase->istatus);
829 /*
830 * The io_request_lock is held when the reset handler is called, hence
831 * the interrupt handler cannot be running in parallel as it also
832 * grabs the lock. /Jes
833 */
834 if (data & RISC_INT)
835 qla1280_isr(ha, &ha->done_q);
836
837 /* 869 /*
838 * Determine the suggested action that the mid-level driver wants 870 * Check to see if we have the command in the outstanding_cmds[]
839 * us to perform. 871 * array. If not then it must have completed before this error
872 * action was initiated. If the error_action isn't ABORT_COMMAND
873 * then the driver must proceed with the requested action.
840 */ 874 */
841 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 875 found = -1;
842 if(action == ABORT_COMMAND) { 876 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
843 /* we never got this command */ 877 if (sp == ha->outstanding_cmds[i]) {
844 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 878 found = i;
845 return SUCCESS; /* no action - we don't have command */ 879 sp->wait = &wait; /* we'll wait for it to complete */
880 break;
846 } 881 }
847 } else {
848 sp->wait = &wait;
849 } 882 }
850 883
851 bus = SCSI_BUS_32(cmd); 884 if (found < 0) { /* driver doesn't have command */
852 target = SCSI_TCN_32(cmd); 885 result = SUCCESS;
853 lun = SCSI_LUN_32(cmd); 886 if (qla1280_verbose) {
887 printk(KERN_INFO
888 "scsi(%ld:%d:%d:%d): specified command has "
889 "already completed.\n", ha->host_no, bus,
890 target, lun);
891 }
892 }
854 893
855 /* Overloading result. Here it means the success or fail of the
856 * *issue* of the action. When we return from the routine, it must
857 * mean the actual success or fail of the action */
858 result = FAILED;
859 switch (action) { 894 switch (action) {
860 case FAIL:
861 break;
862 895
863 case ABORT_COMMAND: 896 case ABORT_COMMAND:
864 if ((sp->flags & SRB_ABORT_PENDING)) { 897 dprintk(1, "qla1280: RISC aborting command\n");
865 printk(KERN_WARNING 898 /*
866 "scsi(): Command has a pending abort " 899 * The abort might fail due to race when the host_lock
867 "message - ABORT_PENDING.\n"); 900 * is released to issue the abort. As such, we
868 /* This should technically be impossible since we 901 * don't bother to check the return status.
869 * now wait for abort completion */ 902 */
870 break; 903 if (found >= 0)
871 } 904 qla1280_abort_command(ha, sp, found);
872
873 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
874 if (sp == ha->outstanding_cmds[i]) {
875 dprintk(1, "qla1280: RISC aborting command\n");
876 if (qla1280_abort_command(ha, sp, i) == 0)
877 result = SUCCESS;
878 else {
879 /*
880 * Since we don't know what might
881 * have happend to the command, it
882 * is unsafe to remove it from the
883 * device's queue at this point.
884 * Wait and let the escalation
885 * process take care of it.
886 */
887 printk(KERN_WARNING
888 "scsi(%li:%i:%i:%i): Unable"
889 " to abort command!\n",
890 ha->host_no, bus, target, lun);
891 }
892 }
893 }
894 break;
895
896 case ABORT_DEVICE:
897 if (qla1280_verbose)
898 printk(KERN_INFO
899 "scsi(%ld:%d:%d:%d): Queueing abort device "
900 "command.\n", ha->host_no, bus, target, lun);
901 if (qla1280_abort_device(ha, bus, target, lun) == 0)
902 result = SUCCESS;
903 break; 905 break;
904 906
905 case DEVICE_RESET: 907 case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
907 printk(KERN_INFO 909 printk(KERN_INFO
908 "scsi(%ld:%d:%d:%d): Queueing device reset " 910 "scsi(%ld:%d:%d:%d): Queueing device reset "
909 "command.\n", ha->host_no, bus, target, lun); 911 "command.\n", ha->host_no, bus, target, lun);
910 if (qla1280_device_reset(ha, bus, target) == 0) 912 if (qla1280_device_reset(ha, bus, target) == 0) {
911 result = SUCCESS; 913 /* issued device reset, set wait conditions */
914 wait_for_bus = bus;
915 wait_for_target = target;
916 }
912 break; 917 break;
913 918
914 case BUS_RESET: 919 case BUS_RESET:
915 if (qla1280_verbose) 920 if (qla1280_verbose)
916 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 921 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
917 "reset.\n", ha->host_no, bus); 922 "reset.\n", ha->host_no, bus);
918 if (qla1280_bus_reset(ha, bus) == 0) 923 if (qla1280_bus_reset(ha, bus) == 0) {
919 result = SUCCESS; 924 /* issued bus reset, set wait conditions */
925 wait_for_bus = bus;
926 }
920 break; 927 break;
921 928
922 case ADAPTER_RESET: 929 case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
929 "continue automatically\n", ha->host_no); 936 "continue automatically\n", ha->host_no);
930 } 937 }
931 ha->flags.reset_active = 1; 938 ha->flags.reset_active = 1;
932 /* 939
933 * We restarted all of the commands automatically, so the 940 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
934 * mid-level code can expect completions momentitarily. 941 result = FAILED;
935 */ 942 }
936 if (qla1280_abort_isp(ha) == 0)
937 result = SUCCESS;
938 943
939 ha->flags.reset_active = 0; 944 ha->flags.reset_active = 0;
940 } 945 }
941 946
942 if (!list_empty(&ha->done_q)) 947 /*
943 qla1280_done(ha); 948 * At this point, the host_lock has been released and retaken
944 949 * by the issuance of the mailbox command.
945 /* If we didn't manage to issue the action, or we have no 950 * Wait for the command passed in by the mid-layer if it
946 * command to wait for, exit here */ 951 * was found by the driver. It might have been returned
947 if (result == FAILED || handle == NULL || 952 * between eh recovery steps, hence the check of the "found"
948 handle == (unsigned char *)INVALID_HANDLE) { 953 * variable.
949 /* 954 */
950 * Clear completion queue to avoid qla1280_done() trying
951 * to complete the command at a later stage after we
952 * have exited the current context
953 */
954 sp->wait = NULL;
955 goto leave;
956 }
957 955
958 /* set up a timer just in case we're really jammed */ 956 if (found >= 0)
959 init_timer(&timer); 957 result = _qla1280_wait_for_single_command(ha, sp, &wait);
960 timer.expires = jiffies + 4*HZ;
961 timer.data = (unsigned long)cmd;
962 timer.function = qla1280_error_wait_timeout;
963 add_timer(&timer);
964 958
965 /* wait for the action to complete (or the timer to expire) */ 959 if (action == ABORT_COMMAND && result != SUCCESS) {
966 spin_unlock_irq(ha->host->host_lock); 960 printk(KERN_WARNING
967 wait_for_completion(&wait); 961 "scsi(%li:%i:%i:%i): "
968 del_timer_sync(&timer); 962 "Unable to abort command!\n",
969 spin_lock_irq(ha->host->host_lock); 963 ha->host_no, bus, target, lun);
970 sp->wait = NULL; 964 }
971 965
972 /* the only action we might get a fail for is abort */ 966 /*
973 if (action == ABORT_COMMAND) { 967 * If the command passed in by the mid-layer has been
974 if(sp->flags & SRB_ABORTED) 968 * returned by the board, then wait for any additional
975 result = SUCCESS; 969 * commands which are supposed to complete based upon
976 else 970 * the error action.
977 result = FAILED; 971 *
972 * All commands are unconditionally returned during a
973 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
974 * to wait for them.
975 */
976 if (result == SUCCESS && wait_for_bus >= 0) {
977 result = qla1280_wait_for_pending_commands(ha,
978 wait_for_bus, wait_for_target);
978 } 979 }
979 980
980 leave:
981 dprintk(1, "RESET returning %d\n", result); 981 dprintk(1, "RESET returning %d\n", result);
982 982
983 LEAVE("qla1280_error_action"); 983 LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
1280 switch ((CMD_RESULT(cmd) >> 16)) { 1280 switch ((CMD_RESULT(cmd) >> 16)) {
1281 case DID_RESET: 1281 case DID_RESET:
1282 /* Issue marker command. */ 1282 /* Issue marker command. */
1283 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1283 if (!ha->flags.abort_isp_active)
1284 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1284 break; 1285 break;
1285 case DID_ABORT: 1286 case DID_ABORT:
1286 sp->flags &= ~SRB_ABORT_PENDING; 1287 sp->flags &= ~SRB_ABORT_PENDING;
1287 sp->flags |= SRB_ABORTED; 1288 sp->flags |= SRB_ABORTED;
1288 if (sp->flags & SRB_TIMEOUT)
1289 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1290 break; 1289 break;
1291 default: 1290 default:
1292 break; 1291 break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
1296 scsi_dma_unmap(cmd); 1295 scsi_dma_unmap(cmd);
1297 1296
1298 /* Call the mid-level driver interrupt handler */ 1297 /* Call the mid-level driver interrupt handler */
1299 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1300 ha->actthreads--; 1298 ha->actthreads--;
1301 1299
1302 (*(cmd)->scsi_done)(cmd); 1300 if (sp->wait == NULL)
1303 1301 (*(cmd)->scsi_done)(cmd);
1304 if(sp->wait != NULL) 1302 else
1305 complete(sp->wait); 1303 complete(sp->wait);
1306 } 1304 }
1307 LEAVE("qla1280_done"); 1305 LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
2417qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2415qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2418{ 2416{
2419 struct device_reg __iomem *reg = ha->iobase; 2417 struct device_reg __iomem *reg = ha->iobase;
2420#if 0
2421 LIST_HEAD(done_q);
2422#endif
2423 int status = 0; 2418 int status = 0;
2424 int cnt; 2419 int cnt;
2425 uint16_t *optr, *iptr; 2420 uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2493 mr = MAILBOX_REGISTER_COUNT; 2488 mr = MAILBOX_REGISTER_COUNT;
2494 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2489 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2495 2490
2496#if 0
2497 /* Go check for any response interrupts pending. */
2498 qla1280_isr(ha, &done_q);
2499#endif
2500
2501 if (ha->flags.reset_marker) 2491 if (ha->flags.reset_marker)
2502 qla1280_rst_aen(ha); 2492 qla1280_rst_aen(ha);
2503 2493
2504#if 0
2505 if (!list_empty(&done_q))
2506 qla1280_done(ha, &done_q);
2507#endif
2508
2509 if (status) 2494 if (status)
2510 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2495 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2511 "0x%x ****\n", mb[0]); 2496 "0x%x ****\n", mb[0]);
@@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2641} 2626}
2642 2627
2643/* 2628/*
2644 * qla1280_abort_device
2645 * Issue an abort message to the device
2646 *
2647 * Input:
2648 * ha = adapter block pointer.
2649 * bus = SCSI BUS.
2650 * target = SCSI ID.
2651 * lun = SCSI LUN.
2652 *
2653 * Returns:
2654 * 0 = success
2655 */
2656static int
2657qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2658{
2659 uint16_t mb[MAILBOX_REGISTER_COUNT];
2660 int status;
2661
2662 ENTER("qla1280_abort_device");
2663
2664 mb[0] = MBC_ABORT_DEVICE;
2665 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2666 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2667
2668 /* Issue marker command. */
2669 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2670
2671 if (status)
2672 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2673
2674 LEAVE("qla1280_abort_device");
2675 return status;
2676}
2677
2678/*
2679 * qla1280_abort_command 2629 * qla1280_abort_command
2680 * Abort command aborts a specified IOCB. 2630 * Abort command aborts a specified IOCB.
2681 * 2631 *
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2833 2783
2834 /* If room for request in request ring. */ 2784 /* If room for request in request ring. */
2835 if ((req_cnt + 2) >= ha->req_q_cnt) { 2785 if ((req_cnt + 2) >= ha->req_q_cnt) {
2836 status = 1; 2786 status = SCSI_MLQUEUE_HOST_BUSY;
2837 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2787 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2838 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2788 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2839 req_cnt); 2789 req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 ha->outstanding_cmds[cnt] != NULL; cnt++); 2795 ha->outstanding_cmds[cnt] != NULL; cnt++);
2846 2796
2847 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2797 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2848 status = 1; 2798 status = SCSI_MLQUEUE_HOST_BUSY;
2849 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2799 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2850 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2800 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2851 goto out; 2801 goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3108 ha->req_q_cnt, seg_cnt); 3058 ha->req_q_cnt, seg_cnt);
3109 /* If room for request in request ring. */ 3059 /* If room for request in request ring. */
3110 if ((req_cnt + 2) >= ha->req_q_cnt) { 3060 if ((req_cnt + 2) >= ha->req_q_cnt) {
3111 status = 1; 3061 status = SCSI_MLQUEUE_HOST_BUSY;
3112 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3062 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3113 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3063 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3114 ha->req_q_cnt, req_cnt); 3064 ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3120 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3070 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3121 3071
3122 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3072 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3123 status = 1; 3073 status = SCSI_MLQUEUE_HOST_BUSY;
3124 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3074 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3125 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3075 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3126 goto out; 3076 goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3487 3437
3488 /* Save ISP completion status */ 3438 /* Save ISP completion status */
3489 CMD_RESULT(sp->cmd) = 0; 3439 CMD_RESULT(sp->cmd) = 0;
3440 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3490 3441
3491 /* Place block on done queue */ 3442 /* Place block on done queue */
3492 list_add_tail(&sp->list, done_q); 3443 list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3495 * If we get here we have a real problem! 3446 * If we get here we have a real problem!
3496 */ 3447 */
3497 printk(KERN_WARNING 3448 printk(KERN_WARNING
3498 "qla1280: ISP invalid handle"); 3449 "qla1280: ISP invalid handle\n");
3499 } 3450 }
3500 } 3451 }
3501 break; 3452 break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3753 } 3704 }
3754 } 3705 }
3755 3706
3707 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3708
3756 /* Place command on done queue. */ 3709 /* Place command on done queue. */
3757 list_add_tail(&sp->list, done_q); 3710 list_add_tail(&sp->list, done_q);
3758 out: 3711 out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3761 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } 3762 }
3810 3763
3764 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3765
3811 /* Place command on done queue. */ 3766 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q); 3767 list_add_tail(&sp->list, done_q);
3813 } 3768 }
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
3858 struct scsi_cmnd *cmd; 3813 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt]; 3814 sp = ha->outstanding_cmds[cnt];
3860 if (sp) { 3815 if (sp) {
3861
3862 cmd = sp->cmd; 3816 cmd = sp->cmd;
3863 CMD_RESULT(cmd) = DID_RESET << 16; 3817 CMD_RESULT(cmd) = DID_RESET << 16;
3864 3818 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3865 sp->cmd = NULL;
3866 ha->outstanding_cmds[cnt] = NULL; 3819 ha->outstanding_cmds[cnt] = NULL;
3867 3820 list_add_tail(&sp->list, &ha->done_q);
3868 (*cmd->scsi_done)(cmd);
3869
3870 sp->flags = 0;
3871 } 3821 }
3872 } 3822 }
3873 3823
3824 qla1280_done(ha);
3825
3874 status = qla1280_load_firmware(ha); 3826 status = qla1280_load_firmware(ha);
3875 if (status) 3827 if (status)
3876 goto out; 3828 goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3955 3907
3956 if (scsi_control == SCSI_PHASE_INVALID) { 3908 if (scsi_control == SCSI_PHASE_INVALID) {
3957 ha->bus_settings[bus].scsi_bus_dead = 1; 3909 ha->bus_settings[bus].scsi_bus_dead = 1;
3958#if 0
3959 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3960 CMD_HANDLE(cp) = INVALID_HANDLE;
3961 /* ha->actthreads--; */
3962
3963 (*(cp)->scsi_done)(cp);
3964#endif
3965 return 1; /* bus is dead */ 3910 return 1; /* bus is dead */
3966 } else { 3911 } else {
3967 ha->bus_settings[bus].scsi_bus_dead = 0; 3912 ha->bus_settings[bus].scsi_bus_dead = 0;
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d7c44b8d2b4f..834884b9eed5 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -88,7 +88,8 @@
88 88
89/* Maximum outstanding commands in ISP queues */ 89/* Maximum outstanding commands in ISP queues */
90#define MAX_OUTSTANDING_COMMANDS 512 90#define MAX_OUTSTANDING_COMMANDS 512
91#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) 91#define COMPLETED_HANDLE ((unsigned char *) \
92 (MAX_OUTSTANDING_COMMANDS + 2))
92 93
93/* ISP request and response entry counts (37-65535) */ 94/* ISP request and response entry counts (37-65535) */
94#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ 95#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b09993a06576..0f8796201504 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
97 return 0; 97 return 0;
98 98
99 if (IS_NOCACHE_VPD_TYPE(ha)) 99 if (IS_NOCACHE_VPD_TYPE(ha))
100 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
101 ha->nvram_size); 101 ha->nvram_size);
102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 102 return memory_read_from_buffer(buf, count, &off, ha->nvram,
103 ha->nvram_size); 103 ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
692 .read = qla2x00_sysfs_read_edc_status, 692 .read = qla2x00_sysfs_read_edc_status,
693}; 693};
694 694
695static ssize_t
696qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
697 struct bin_attribute *bin_attr,
698 char *buf, loff_t off, size_t count)
699{
700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
701 struct device, kobj)));
702 struct qla_hw_data *ha = vha->hw;
703 int rval;
704 uint16_t actual_size;
705
706 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
707 return 0;
708
709 if (ha->xgmac_data)
710 goto do_read;
711
712 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
713 &ha->xgmac_data_dma, GFP_KERNEL);
714 if (!ha->xgmac_data) {
715 qla_printk(KERN_WARNING, ha,
716 "Unable to allocate memory for XGMAC read-data.\n");
717 return 0;
718 }
719
720do_read:
721 actual_size = 0;
722 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
723
724 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
725 XGMAC_DATA_SIZE, &actual_size);
726 if (rval != QLA_SUCCESS) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to read XGMAC data (%x).\n", rval);
729 count = 0;
730 }
731
732 count = actual_size > count ? count: actual_size;
733 memcpy(buf, ha->xgmac_data, count);
734
735 return count;
736}
737
738static struct bin_attribute sysfs_xgmac_stats_attr = {
739 .attr = {
740 .name = "xgmac_stats",
741 .mode = S_IRUSR,
742 },
743 .size = 0,
744 .read = qla2x00_sysfs_read_xgmac_stats,
745};
746
747static ssize_t
748qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
749 struct bin_attribute *bin_attr,
750 char *buf, loff_t off, size_t count)
751{
752 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
753 struct device, kobj)));
754 struct qla_hw_data *ha = vha->hw;
755 int rval;
756 uint16_t actual_size;
757
758 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
759 return 0;
760
761 if (ha->dcbx_tlv)
762 goto do_read;
763
764 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
765 &ha->dcbx_tlv_dma, GFP_KERNEL);
766 if (!ha->dcbx_tlv) {
767 qla_printk(KERN_WARNING, ha,
768 "Unable to allocate memory for DCBX TLV read-data.\n");
769 return 0;
770 }
771
772do_read:
773 actual_size = 0;
774 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
775
776 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
777 DCBX_TLV_DATA_SIZE);
778 if (rval != QLA_SUCCESS) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to read DCBX TLV data (%x).\n", rval);
781 count = 0;
782 }
783
784 memcpy(buf, ha->dcbx_tlv, count);
785
786 return count;
787}
788
789static struct bin_attribute sysfs_dcbx_tlv_attr = {
790 .attr = {
791 .name = "dcbx_tlv",
792 .mode = S_IRUSR,
793 },
794 .size = 0,
795 .read = qla2x00_sysfs_read_dcbx_tlv,
796};
797
695static struct sysfs_entry { 798static struct sysfs_entry {
696 char *name; 799 char *name;
697 struct bin_attribute *attr; 800 struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
706 { "reset", &sysfs_reset_attr, }, 809 { "reset", &sysfs_reset_attr, },
707 { "edc", &sysfs_edc_attr, 2 }, 810 { "edc", &sysfs_edc_attr, 2 },
708 { "edc_status", &sysfs_edc_status_attr, 2 }, 811 { "edc_status", &sysfs_edc_status_attr, 2 },
812 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
813 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
709 { NULL }, 814 { NULL },
710}; 815};
711 816
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
721 continue; 826 continue;
722 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 827 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
723 continue; 828 continue;
829 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
830 continue;
724 831
725 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 832 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
726 iter->attr); 833 iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
743 continue; 850 continue;
744 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 851 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
745 continue; 852 continue;
853 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
854 continue;
746 855
747 sysfs_remove_bin_file(&host->shost_gendev.kobj, 856 sysfs_remove_bin_file(&host->shost_gendev.kobj,
748 iter->attr); 857 iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
1088 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1197 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1089} 1198}
1090 1199
1200static ssize_t
1201qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1202 char *buf)
1203{
1204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1205
1206 if (!IS_QLA81XX(vha->hw))
1207 return snprintf(buf, PAGE_SIZE, "\n");
1208
1209 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1210}
1211
1212static ssize_t
1213qla2x00_vn_port_mac_address_show(struct device *dev,
1214 struct device_attribute *attr, char *buf)
1215{
1216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1217
1218 if (!IS_QLA81XX(vha->hw))
1219 return snprintf(buf, PAGE_SIZE, "\n");
1220
1221 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1222 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1223 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1224 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1225}
1226
1227static ssize_t
1228qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1229 char *buf)
1230{
1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232
1233 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1234}
1235
1236static ssize_t
1237qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf)
1239{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval;
1242 uint16_t state[5];
1243
1244 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state));
1247
1248 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1249 state[1], state[2], state[3], state[4]);
1250}
1251
1091static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1252static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1092static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1253static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1093static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1254static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1116static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1277static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1117static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1278static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1118 NULL); 1279 NULL);
1280static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1281static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1282 qla2x00_vn_port_mac_address_show, NULL);
1283static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1284static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1119 1285
1120struct device_attribute *qla2x00_host_attrs[] = { 1286struct device_attribute *qla2x00_host_attrs[] = {
1121 &dev_attr_driver_version, 1287 &dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
1138 &dev_attr_mpi_version, 1304 &dev_attr_mpi_version,
1139 &dev_attr_phy_version, 1305 &dev_attr_phy_version,
1140 &dev_attr_flash_block_size, 1306 &dev_attr_flash_block_size,
1307 &dev_attr_vlan_id,
1308 &dev_attr_vn_port_mac_address,
1309 &dev_attr_fabric_param,
1310 &dev_attr_fw_state,
1141 NULL, 1311 NULL,
1142}; 1312};
1143 1313
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1313 * At this point all fcport's software-states are cleared. Perform any 1483 * At this point all fcport's software-states are cleared. Perform any
1314 * final cleanup of firmware resources (PCBs and XCBs). 1484 * final cleanup of firmware resources (PCBs and XCBs).
1315 */ 1485 */
1316 if (fcport->loop_id != FC_NO_LOOP_ID) 1486 if (fcport->loop_id != FC_NO_LOOP_ID &&
1487 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1317 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1318 fcport->loop_id, fcport->d_id.b.domain, 1489 fcport->loop_id, fcport->d_id.b.domain,
1319 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1490 fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
1437qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1608qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1438{ 1609{
1439 int ret = 0; 1610 int ret = 0;
1440 int cnt = 0; 1611 uint8_t qos = 0;
1441 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1612 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1443 scsi_qla_host_t *vha = NULL; 1613 scsi_qla_host_t *vha = NULL;
1444 struct qla_hw_data *ha = base_vha->hw; 1614 struct qla_hw_data *ha = base_vha->hw;
1615 uint16_t options = 0;
1616 int cnt;
1617 struct req_que *req = ha->req_q_map[0];
1445 1618
1446 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1619 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1447 if (ret) { 1620 if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1497 1670
1498 qla24xx_vport_disable(fc_vport, disable); 1671 qla24xx_vport_disable(fc_vport, disable);
1499 1672
1500 /* Create a queue pair for the vport */ 1673 if (ql2xmultique_tag) {
1501 if (ha->mqenable) { 1674 req = ha->req_q_map[1];
1502 if (ha->npiv_info) { 1675 goto vport_queue;
1503 for (; cnt < ha->nvram_npiv_size; cnt++) { 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1504 if (ha->npiv_info[cnt].port_name == 1677 goto vport_queue;
1505 vha->port_name && 1678 /* Create a request queue in QoS mode for the vport */
1506 ha->npiv_info[cnt].node_name == 1679 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1507 vha->node_name) { 1680 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1508 qos = ha->npiv_info[cnt].q_qos; 1681 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1509 break; 1682 8) == 0) {
1510 } 1683 qos = ha->npiv_info[cnt].q_qos;
1511 } 1684 break;
1685 }
1686 }
1687 if (qos) {
1688 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1689 qos);
1690 if (!ret)
1691 qla_printk(KERN_WARNING, ha,
1692 "Can't create request queue for vp_idx:%d\n",
1693 vha->vp_idx);
1694 else {
1695 DEBUG2(qla_printk(KERN_INFO, ha,
1696 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1697 ret, qos, vha->vp_idx));
1698 req = ha->req_q_map[ret];
1512 } 1699 }
1513 qla25xx_create_queues(vha, qos);
1514 } 1700 }
1515 1701
1702vport_queue:
1703 vha->req = req;
1516 return 0; 1704 return 0;
1705
1517vport_create_failed_2: 1706vport_create_failed_2:
1518 qla24xx_disable_vp(vha); 1707 qla24xx_disable_vp(vha);
1519 qla24xx_deallocate_vp_id(vha); 1708 qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1554 vha->host_no, vha->vp_idx, vha)); 1743 vha->host_no, vha->vp_idx, vha));
1555 } 1744 }
1556 1745
1557 if (ha->mqenable) { 1746 if (vha->req->id && !ql2xmultique_tag) {
1558 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) 1747 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1559 qla_printk(KERN_WARNING, ha, 1748 qla_printk(KERN_WARNING, ha,
1560 "Queue delete failed.\n"); 1749 "Queue delete failed.\n");
1561 } 1750 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f17..4a990f4da4ea 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS; 149 int rval = QLA_SUCCESS;
150 uint32_t cnt; 150 uint32_t cnt;
151 151
152 if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
153 return rval;
154
155 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); 152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
156 for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 && 153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
157 rval == QLA_SUCCESS; cnt--) { 155 rval == QLA_SUCCESS; cnt--) {
158 if (cnt) 156 if (cnt)
159 udelay(100); 157 udelay(100);
@@ -351,7 +349,7 @@ static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 349qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{ 350{
353 uint32_t cnt, que_idx; 351 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt; 352 uint8_t que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr; 353 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg; 354 struct device_reg_25xxmq __iomem *reg;
357 355
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365 363
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 365 ha->max_req_queues : ha->max_rsp_queues;
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt); 366 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) { 367 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *) 368 reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 714ee67567e1..00aa48d975a6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
93#define LSD(x) ((uint32_t)((uint64_t)(x))) 93#define LSD(x) ((uint32_t)((uint64_t)(x)))
94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
95 95
96#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
96 97
97/* 98/*
98 * I/O register 99 * I/O register
@@ -179,6 +180,7 @@
179#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 180#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
180#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
181#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
182 184
183struct req_que; 185struct req_que;
184 186
@@ -186,7 +188,6 @@ struct req_que;
186 * SCSI Request Block 188 * SCSI Request Block
187 */ 189 */
188typedef struct srb { 190typedef struct srb {
189 struct req_que *que;
190 struct fc_port *fcport; 191 struct fc_port *fcport;
191 192
192 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 193 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
2008#define VP_RET_CODE_NOT_FOUND 6 2009#define VP_RET_CODE_NOT_FOUND 6
2009 2010
2010struct qla_hw_data; 2011struct qla_hw_data;
2011 2012struct rsp_que;
2012/* 2013/*
2013 * ISP operations 2014 * ISP operations
2014 */ 2015 */
@@ -2030,10 +2031,9 @@ struct isp_operations {
2030 void (*enable_intrs) (struct qla_hw_data *); 2031 void (*enable_intrs) (struct qla_hw_data *);
2031 void (*disable_intrs) (struct qla_hw_data *); 2032 void (*disable_intrs) (struct qla_hw_data *);
2032 2033
2033 int (*abort_command) (struct scsi_qla_host *, srb_t *, 2034 int (*abort_command) (srb_t *);
2034 struct req_que *); 2035 int (*target_reset) (struct fc_port *, unsigned int, int);
2035 int (*target_reset) (struct fc_port *, unsigned int); 2036 int (*lun_reset) (struct fc_port *, unsigned int, int);
2036 int (*lun_reset) (struct fc_port *, unsigned int);
2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
2038 uint8_t, uint8_t, uint16_t *, uint8_t); 2038 uint8_t, uint8_t, uint16_t *, uint8_t);
2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, 2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
2079#define QLA_PCI_MSIX_CONTROL 0xa2 2079#define QLA_PCI_MSIX_CONTROL 0xa2
2080 2080
2081struct scsi_qla_host; 2081struct scsi_qla_host;
2082struct rsp_que;
2083 2082
2084struct qla_msix_entry { 2083struct qla_msix_entry {
2085 int have_irq; 2084 int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
2140#define MBC_INITIALIZE_MULTIQ 0x1f 2139#define MBC_INITIALIZE_MULTIQ 0x1f
2141#define QLA_QUE_PAGE 0X1000 2140#define QLA_QUE_PAGE 0X1000
2142#define QLA_MQ_SIZE 32 2141#define QLA_MQ_SIZE 32
2143#define QLA_MAX_HOST_QUES 16
2144#define QLA_MAX_QUEUES 256 2142#define QLA_MAX_QUEUES 256
2145#define ISP_QUE_REG(ha, id) \ 2143#define ISP_QUE_REG(ha, id) \
2146 ((ha->mqenable) ? \ 2144 ((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
2170 struct qla_hw_data *hw; 2168 struct qla_hw_data *hw;
2171 struct qla_msix_entry *msix; 2169 struct qla_msix_entry *msix;
2172 struct req_que *req; 2170 struct req_que *req;
2171 srb_t *status_srb; /* status continuation entry */
2172 struct work_struct q_work;
2173}; 2173};
2174 2174
2175/* Request queue data structure */ 2175/* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
2222 uint32_t fce_enabled :1; 2222 uint32_t fce_enabled :1;
2223 uint32_t fac_supported :1; 2223 uint32_t fac_supported :1;
2224 uint32_t chip_reset_done :1; 2224 uint32_t chip_reset_done :1;
2225 uint32_t port0 :1;
2226 uint32_t running_gold_fw :1;
2225 } flags; 2227 } flags;
2226 2228
2227 /* This spinlock is used to protect "io transactions", you must 2229 /* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
2246 struct rsp_que **rsp_q_map; 2248 struct rsp_que **rsp_q_map;
2247 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2249 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2248 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2250 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2249 uint16_t max_queues; 2251 uint8_t max_req_queues;
2252 uint8_t max_rsp_queues;
2250 struct qla_npiv_entry *npiv_info; 2253 struct qla_npiv_entry *npiv_info;
2251 uint16_t nvram_npiv_size; 2254 uint16_t nvram_npiv_size;
2252 2255
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
2255#define FLOGI_MID_SUPPORT BIT_10 2258#define FLOGI_MID_SUPPORT BIT_10
2256#define FLOGI_VSAN_SUPPORT BIT_12 2259#define FLOGI_VSAN_SUPPORT BIT_12
2257#define FLOGI_SP_SUPPORT BIT_13 2260#define FLOGI_SP_SUPPORT BIT_13
2261
2262 uint8_t port_no; /* Physical port of adapter */
2263
2258 /* Timeout timers. */ 2264 /* Timeout timers. */
2259 uint8_t loop_down_abort_time; /* port down timer */ 2265 uint8_t loop_down_abort_time; /* port down timer */
2260 atomic_t loop_down_timer; /* loop down timer */ 2266 atomic_t loop_down_timer; /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
2392 dma_addr_t edc_data_dma; 2398 dma_addr_t edc_data_dma;
2393 uint16_t edc_data_len; 2399 uint16_t edc_data_len;
2394 2400
2401#define XGMAC_DATA_SIZE PAGE_SIZE
2402 void *xgmac_data;
2403 dma_addr_t xgmac_data_dma;
2404
2405#define DCBX_TLV_DATA_SIZE PAGE_SIZE
2406 void *dcbx_tlv;
2407 dma_addr_t dcbx_tlv_dma;
2408
2395 struct task_struct *dpc_thread; 2409 struct task_struct *dpc_thread;
2396 uint8_t dpc_active; /* DPC routine is active */ 2410 uint8_t dpc_active; /* DPC routine is active */
2397 2411
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
2510 uint32_t flt_region_vpd; 2524 uint32_t flt_region_vpd;
2511 uint32_t flt_region_nvram; 2525 uint32_t flt_region_nvram;
2512 uint32_t flt_region_npiv_conf; 2526 uint32_t flt_region_npiv_conf;
2527 uint32_t flt_region_gold_fw;
2513 2528
2514 /* Needed for BEACON */ 2529 /* Needed for BEACON */
2515 uint16_t beacon_blink_led; 2530 uint16_t beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
2536 struct qla_chip_state_84xx *cs84xx; 2551 struct qla_chip_state_84xx *cs84xx;
2537 struct qla_statistics qla_stats; 2552 struct qla_statistics qla_stats;
2538 struct isp_operations *isp_ops; 2553 struct isp_operations *isp_ops;
2554 struct workqueue_struct *wq;
2539}; 2555};
2540 2556
2541/* 2557/*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
2545 struct list_head list; 2561 struct list_head list;
2546 struct list_head vp_fcports; /* list of fcports */ 2562 struct list_head vp_fcports; /* list of fcports */
2547 struct list_head work_list; 2563 struct list_head work_list;
2564 spinlock_t work_lock;
2565
2548 /* Commonly used flags and state information. */ 2566 /* Commonly used flags and state information. */
2549 struct Scsi_Host *host; 2567 struct Scsi_Host *host;
2550 unsigned long host_no; 2568 unsigned long host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
2591#define SWITCH_FOUND BIT_0 2609#define SWITCH_FOUND BIT_0
2592#define DFLG_NO_CABLE BIT_1 2610#define DFLG_NO_CABLE BIT_1
2593 2611
2594 srb_t *status_srb; /* Status continuation entry. */
2595
2596 /* ISP configuration data. */ 2612 /* ISP configuration data. */
2597 uint16_t loop_id; /* Host adapter loop id */ 2613 uint16_t loop_id; /* Host adapter loop id */
2598 2614
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
2618 uint8_t node_name[WWN_SIZE]; 2634 uint8_t node_name[WWN_SIZE];
2619 uint8_t port_name[WWN_SIZE]; 2635 uint8_t port_name[WWN_SIZE];
2620 uint8_t fabric_node_name[WWN_SIZE]; 2636 uint8_t fabric_node_name[WWN_SIZE];
2637
2638 uint16_t fcoe_vlan_id;
2639 uint16_t fcoe_fcf_idx;
2640 uint8_t fcoe_vn_port_mac[6];
2641
2621 uint32_t vp_abort_cnt; 2642 uint32_t vp_abort_cnt;
2622 2643
2623 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2644 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
2643#define VP_ERR_FAB_LOGOUT 4 2664#define VP_ERR_FAB_LOGOUT 4
2644#define VP_ERR_ADAP_NORESOURCES 5 2665#define VP_ERR_ADAP_NORESOURCES 5
2645 struct qla_hw_data *hw; 2666 struct qla_hw_data *hw;
2646 int req_ques[QLA_MAX_HOST_QUES]; 2667 struct req_que *req;
2647} scsi_qla_host_t; 2668} scsi_qla_host_t;
2648 2669
2649/* 2670/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 96ccb9642ba0..dfde2dd865cb 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -878,7 +878,6 @@ struct device_reg_24xx {
878 /* HCCR statuses. */ 878 /* HCCR statuses. */
879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ 879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ 880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
881#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */
882 /* HCCR commands. */ 881 /* HCCR commands. */
883 /* NOOP. */ 882 /* NOOP. */
884#define HCCRX_NOOP 0x00000000 883#define HCCRX_NOOP 0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
1241#define FLT_REG_HW_EVENT_1 0x1f 1240#define FLT_REG_HW_EVENT_1 0x1f
1242#define FLT_REG_NPIV_CONF_0 0x29 1241#define FLT_REG_NPIV_CONF_0 0x29
1243#define FLT_REG_NPIV_CONF_1 0x2a 1242#define FLT_REG_NPIV_CONF_1 0x2a
1243#define FLT_REG_GOLD_FW 0x2f
1244 1244
1245struct qla_flt_region { 1245struct qla_flt_region {
1246 uint32_t code; 1246 uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
1405#define MBC_IDC_ACK 0x101 1405#define MBC_IDC_ACK 0x101
1406#define MBC_RESTART_MPI_FW 0x3d 1406#define MBC_RESTART_MPI_FW 0x3d
1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ 1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
1408#define MBC_GET_XGMAC_STATS 0x7a
1409#define MBC_GET_DCBX_PARAMS 0x51
1408 1410
1409/* Flash access control option field bit definitions */ 1411/* Flash access control option field bit definitions */
1410#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1412#define FAC_OPT_FORCE_SEMAPHORE BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
1711#define FA_VPD0_ADDR_81 0xD0000 1713#define FA_VPD0_ADDR_81 0xD0000
1712#define FA_VPD1_ADDR_81 0xD0400 1714#define FA_VPD1_ADDR_81 0xD0400
1713#define FA_NVRAM0_ADDR_81 0xD0080 1715#define FA_NVRAM0_ADDR_81 0xD0080
1714#define FA_NVRAM1_ADDR_81 0xD0480 1716#define FA_NVRAM1_ADDR_81 0xD0180
1715#define FA_FEATURE_ADDR_81 0xD4000 1717#define FA_FEATURE_ADDR_81 0xD4000
1716#define FA_FLASH_DESCR_ADDR_81 0xD8000 1718#define FA_FLASH_DESCR_ADDR_81 0xD8000
1717#define FA_FLASH_LAYOUT_ADDR_81 0xD8400 1719#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed9..65b12d82867c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
65extern int ql2xallocfwdump; 65extern int ql2xallocfwdump;
66extern int ql2xextended_error_logging; 66extern int ql2xextended_error_logging;
67extern int ql2xqfullrampup; 67extern int ql2xqfullrampup;
68extern int ql2xqfulltracking;
68extern int ql2xiidmaenable; 69extern int ql2xiidmaenable;
69extern int ql2xmaxqueues; 70extern int ql2xmaxqueues;
71extern int ql2xmultique_tag;
72extern int ql2xfwloadbin;
70 73
71extern int qla2x00_loop_reset(scsi_qla_host_t *); 74extern int qla2x00_loop_reset(scsi_qla_host_t *);
72extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 75extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
145extern int 148extern int
146qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 149qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
147 150
148extern void 151extern int
149qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, 152qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
150 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); 153 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
151 154
@@ -165,13 +168,13 @@ extern int
165qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 168qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
166 169
167extern int 170extern int
168qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 171qla2x00_abort_command(srb_t *);
169 172
170extern int 173extern int
171qla2x00_abort_target(struct fc_port *, unsigned int); 174qla2x00_abort_target(struct fc_port *, unsigned int, int);
172 175
173extern int 176extern int
174qla2x00_lun_reset(struct fc_port *, unsigned int); 177qla2x00_lun_reset(struct fc_port *, unsigned int, int);
175 178
176extern int 179extern int
177qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 180qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
236qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 239qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
237 dma_addr_t); 240 dma_addr_t);
238 241
239extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 242extern int qla24xx_abort_command(srb_t *);
240extern int qla24xx_abort_target(struct fc_port *, unsigned int); 243extern int
241extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 244qla24xx_abort_target(struct fc_port *, unsigned int, int);
245extern int
246qla24xx_lun_reset(struct fc_port *, unsigned int, int);
242 247
243extern int 248extern int
244qla2x00_system_error(scsi_qla_host_t *); 249qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
288extern int 293extern int
289qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); 294qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
290 295
296extern int
297qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
298
299extern int
300qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
301
302extern int
303qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
304
305extern int
306qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
307
291/* 308/*
292 * Global Function Prototypes in qla_isr.c source file. 309 * Global Function Prototypes in qla_isr.c source file.
293 */ 310 */
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
295extern irqreturn_t qla2300_intr_handler(int, void *); 312extern irqreturn_t qla2300_intr_handler(int, void *);
296extern irqreturn_t qla24xx_intr_handler(int, void *); 313extern irqreturn_t qla24xx_intr_handler(int, void *);
297extern void qla2x00_process_response_queue(struct rsp_que *); 314extern void qla2x00_process_response_queue(struct rsp_que *);
298extern void qla24xx_process_response_queue(struct rsp_que *); 315extern void
299 316qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
300extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 317extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
301extern void qla2x00_free_irqs(scsi_qla_host_t *); 318extern void qla2x00_free_irqs(scsi_qla_host_t *);
302 319
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
401extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 418extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
402extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 419extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
403extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 420extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
404 uint16_t, uint8_t, uint8_t); 421 uint16_t, int, uint8_t);
405extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 422extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
406 uint16_t); 423 uint16_t, int);
407extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); 424extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
408extern void qla2x00_init_response_q_entries(struct rsp_que *); 425extern void qla2x00_init_response_q_entries(struct rsp_que *);
409extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 426extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
410extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); 427extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
411extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); 428extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
412extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); 429extern int qla25xx_delete_queues(struct scsi_qla_host *);
413extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 430extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
414extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 431extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
415extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 432extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
416extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 433extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
417extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 434extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
418extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 435extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
436extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
437
419#endif /* _QLA_GBL_H */ 438#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 557f58d5bf88..917534b9f221 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1107 return ret; 1107 return ret;
1108 1108
1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1110 mb, BIT_1); 1110 mb, BIT_1|BIT_0);
1111 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1879 case BIT_13: 1879 case BIT_13:
1880 list[i].fp_speed = PORT_SPEED_4GB; 1880 list[i].fp_speed = PORT_SPEED_4GB;
1881 break; 1881 break;
1882 case BIT_12:
1883 list[i].fp_speed = PORT_SPEED_10GB;
1884 break;
1882 case BIT_11: 1885 case BIT_11:
1883 list[i].fp_speed = PORT_SPEED_8GB; 1886 list[i].fp_speed = PORT_SPEED_8GB;
1884 break; 1887 break;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bd7dd84c0648..262026129325 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
634 goto chip_diag_failed; 634 goto chip_diag_failed;
635 635
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
637 ha->host_no)); 637 vha->host_no));
638 638
639 /* Reset RISC processor. */ 639 /* Reset RISC processor. */
640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
655 goto chip_diag_failed; 655 goto chip_diag_failed;
656 656
657 /* Check product ID of chip */ 657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
659 659
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
730 struct qla_hw_data *ha = vha->hw; 730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0]; 731 struct req_que *req = ha->req_q_map[0];
732 732
733 /* Perform RISC reset. */
734 qla24xx_reset_risc(vha);
735
736 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
737 734
738 rval = qla2x00_mbx_reg_test(vha); 735 rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
786 sizeof(uint32_t); 783 sizeof(uint32_t);
787 if (ha->mqenable) 784 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 785 mq_size = sizeof(struct qla2xxx_mq_chain);
789
790 /* Allocate memory for Fibre Channel Event Buffer. */ 786 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 788 goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
850 rsp_q_size = rsp->length * sizeof(response_t); 846 rsp_q_size = rsp->length * sizeof(response_t);
851 847
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
854 eft_size;
855 ha->chain_offset = dump_size; 850 ha->chain_offset = dump_size;
856 dump_size += mq_size + fce_size; 851 dump_size += mq_size + fce_size;
857 852
@@ -891,6 +886,56 @@ cont_alloc:
891 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
892} 887}
893 888
889static int
890qla81xx_mpi_sync(scsi_qla_host_t *vha)
891{
892#define MPS_MASK 0xe0
893 int rval;
894 uint16_t dc;
895 uint32_t dw;
896 struct qla_hw_data *ha = vha->hw;
897
898 if (!IS_QLA81XX(vha->hw))
899 return QLA_SUCCESS;
900
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
905 goto done;
906 }
907
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
913 goto done_release;
914 }
915
916 dc &= MPS_MASK;
917 if (dc == (dw & MPS_MASK))
918 goto done_release;
919
920 dw &= ~MPS_MASK;
921 dw |= dc;
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
926 }
927
928done_release:
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
933 }
934
935done:
936 return rval;
937}
938
894/** 939/**
895 * qla2x00_setup_chip() - Load and start RISC firmware. 940 * qla2x00_setup_chip() - Load and start RISC firmware.
896 * @ha: HA context 941 * @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
915 spin_unlock_irqrestore(&ha->hardware_lock, flags); 960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 } 961 }
917 962
963 qla81xx_mpi_sync(vha);
964
918 /* Load firmware sequences */ 965 /* Load firmware sequences */
919 rval = ha->isp_ops->load_risc(vha, &srisc_address); 966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
920 if (rval == QLA_SUCCESS) { 967 if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
931 /* Retrieve firmware information. */ 978 /* Retrieve firmware information. */
932 if (rval == QLA_SUCCESS) { 979 if (rval == QLA_SUCCESS) {
933 fw_major_version = ha->fw_major_version; 980 fw_major_version = ha->fw_major_version;
934 qla2x00_get_fw_version(vha, 981 rval = qla2x00_get_fw_version(vha,
935 &ha->fw_major_version, 982 &ha->fw_major_version,
936 &ha->fw_minor_version, 983 &ha->fw_minor_version,
937 &ha->fw_subminor_version, 984 &ha->fw_subminor_version,
938 &ha->fw_attributes, &ha->fw_memory_size, 985 &ha->fw_attributes, &ha->fw_memory_size,
939 ha->mpi_version, &ha->mpi_capabilities, 986 ha->mpi_version, &ha->mpi_capabilities,
940 ha->phy_version); 987 ha->phy_version);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
941 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
942 if (IS_QLA2XXX_MIDTYPE(ha) && 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
943 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
989 ha->fw_subminor_version); 1039 ha->fw_subminor_version);
990 } 1040 }
991 } 1041 }
992 1042failed:
993 if (rval) { 1043 if (rval) {
994 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
995 vha->host_no)); 1045 vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
1013 uint16_t cnt; 1063 uint16_t cnt;
1014 response_t *pkt; 1064 response_t *pkt;
1015 1065
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1016 pkt = rsp->ring_ptr; 1069 pkt = rsp->ring_ptr;
1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1018 pkt->signature = RESPONSE_PROCESSED; 1071 pkt->signature = RESPONSE_PROCESSED;
1019 pkt++; 1072 pkt++;
1020 } 1073 }
1021
1022} 1074}
1023 1075
1024/** 1076/**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1176 if (ha->flags.msix_enabled) { 1228 if (ha->flags.msix_enabled) {
1177 msix = &ha->msix_entries[1]; 1229 msix = &ha->msix_entries[1];
1178 DEBUG2_17(printk(KERN_INFO 1230 DEBUG2_17(printk(KERN_INFO
1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1231 "Registering vector 0x%x for base que\n", msix->entry));
1180 icb->msix = cpu_to_le16(msix->entry); 1232 icb->msix = cpu_to_le16(msix->entry);
1181 } 1233 }
1182 /* Use alternate PCI bus number */ 1234 /* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 1283
1232 /* Clear outstanding commands array. */ 1284 /* Clear outstanding commands array. */
1233 for (que = 0; que < ha->max_queues; que++) { 1285 for (que = 0; que < ha->max_req_queues; que++) {
1234 req = ha->req_q_map[que]; 1286 req = ha->req_q_map[que];
1235 if (!req) 1287 if (!req)
1236 continue; 1288 continue;
1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1238 req->outstanding_cmds[cnt] = NULL; 1290 req->outstanding_cmds[cnt] = NULL;
1239 1291
1240 req->current_outstanding_cmd = 0; 1292 req->current_outstanding_cmd = 1;
1241 1293
1242 /* Initialize firmware. */ 1294 /* Initialize firmware. */
1243 req->ring_ptr = req->ring; 1295 req->ring_ptr = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1245 req->cnt = req->length; 1297 req->cnt = req->length;
1246 } 1298 }
1247 1299
1248 for (que = 0; que < ha->max_queues; que++) { 1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1249 rsp = ha->rsp_q_map[que]; 1301 rsp = ha->rsp_q_map[que];
1250 if (!rsp) 1302 if (!rsp)
1251 continue; 1303 continue;
1252 rsp->ring_ptr = rsp->ring;
1253 rsp->ring_index = 0;
1254
1255 /* Initialize response queue entries */ 1304 /* Initialize response queue entries */
1256 qla2x00_init_response_q_entries(rsp); 1305 qla2x00_init_response_q_entries(rsp);
1257 } 1306 }
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1307 unsigned long wtime, mtime, cs84xx_time; 1356 unsigned long wtime, mtime, cs84xx_time;
1308 uint16_t min_wait; /* Minimum wait time if loop is down */ 1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1309 uint16_t wait_time; /* Wait time if loop is coming ready */ 1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1310 uint16_t state[3]; 1359 uint16_t state[5];
1311 struct qla_hw_data *ha = vha->hw; 1360 struct qla_hw_data *ha = vha->hw;
1312 1361
1313 rval = QLA_SUCCESS; 1362 rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1406 vha->host_no, state[0], jiffies)); 1455 vha->host_no, state[0], jiffies));
1407 } while (1); 1456 } while (1);
1408 1457
1409 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1410 vha->host_no, state[0], jiffies)); 1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1460 jiffies));
1411 1461
1412 if (rval) { 1462 if (rval) {
1413 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1541 char *st, *en; 1591 char *st, *en;
1542 uint16_t index; 1592 uint16_t index;
1543 struct qla_hw_data *ha = vha->hw; 1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1544 1595
1545 if (memcmp(model, BINZERO, len) != 0) { 1596 if (memcmp(model, BINZERO, len) != 0) {
1546 strncpy(ha->model_number, model, len); 1597 strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 } 1604 }
1554 1605
1555 index = (ha->pdev->subsystem_device & 0xff); 1606 index = (ha->pdev->subsystem_device & 0xff);
1556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1607 if (use_tbl &&
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1557 index < QLA_MODEL_NAMES) 1609 index < QLA_MODEL_NAMES)
1558 strncpy(ha->model_desc, 1610 strncpy(ha->model_desc,
1559 qla2x00_model_name[index * 2 + 1], 1611 qla2x00_model_name[index * 2 + 1],
1560 sizeof(ha->model_desc) - 1); 1612 sizeof(ha->model_desc) - 1);
1561 } else { 1613 } else {
1562 index = (ha->pdev->subsystem_device & 0xff); 1614 index = (ha->pdev->subsystem_device & 0xff);
1563 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1615 if (use_tbl &&
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1564 index < QLA_MODEL_NAMES) { 1617 index < QLA_MODEL_NAMES) {
1565 strcpy(ha->model_number, 1618 strcpy(ha->model_number,
1566 qla2x00_model_name[index * 2]); 1619 qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2061 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2062 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2063 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2064 if (test_bit(RSCN_UPDATE, &save_flags)) 2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2065 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2120 }
2066 } 2121 }
2067 2122
2068 return (rval); 2123 return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2110 goto cleanup_allocation; 2165 goto cleanup_allocation;
2111 2166
2112 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2113 ha->host_no, entries)); 2168 vha->host_no, entries));
2114 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2115 entries * sizeof(struct gid_list_info))); 2170 entries * sizeof(struct gid_list_info)));
2116 2171
@@ -2243,7 +2298,8 @@ static void
2243qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2298qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2244{ 2299{
2245#define LS_UNKNOWN 2 2300#define LS_UNKNOWN 2
2246 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2302 char *link_speed;
2247 int rval; 2303 int rval;
2248 uint16_t mb[6]; 2304 uint16_t mb[6];
2249 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2266 fcport->port_name[6], fcport->port_name[7], rval, 2322 fcport->port_name[6], fcport->port_name[7], rval,
2267 fcport->fp_speed, mb[0], mb[1])); 2323 fcport->fp_speed, mb[0], mb[1]));
2268 } else { 2324 } else {
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2269 DEBUG2(qla_printk(KERN_INFO, ha, 2330 DEBUG2(qla_printk(KERN_INFO, ha,
2270 "iIDMA adjusted to %s GB/s on " 2331 "iIDMA adjusted to %s GB/s on "
2271 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2272 link_speeds[fcport->fp_speed], fcport->port_name[0], 2333 link_speed, fcport->port_name[0],
2273 fcport->port_name[1], fcport->port_name[2], 2334 fcport->port_name[1], fcport->port_name[2],
2274 fcport->port_name[3], fcport->port_name[4], 2335 fcport->port_name[3], fcport->port_name[4],
2275 fcport->port_name[5], fcport->port_name[6], 2336 fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3180{ 3241{
3181 int rval = QLA_SUCCESS; 3242 int rval = QLA_SUCCESS;
3182 uint32_t wait_time; 3243 uint32_t wait_time;
3183 struct qla_hw_data *ha = vha->hw; 3244 struct req_que *req;
3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3245 struct rsp_que *rsp;
3185 struct rsp_que *rsp = req->rsp; 3246
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3249 else
3250 req = vha->req;
3251 rsp = req->rsp;
3186 3252
3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3188 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3448 int ret = -1; 3514 int ret = -1;
3449 int i; 3515 int i;
3450 3516
3451 for (i = 1; i < ha->max_queues; i++) { 3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3452 rsp = ha->rsp_q_map[i]; 3518 rsp = ha->rsp_q_map[i];
3453 if (rsp) { 3519 if (rsp) {
3454 rsp->options &= ~BIT_0; 3520 rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3462 "%s Rsp que:%d inited\n", __func__, 3528 "%s Rsp que:%d inited\n", __func__,
3463 rsp->id)); 3529 rsp->id));
3464 } 3530 }
3531 }
3532 for (i = 1; i < ha->max_req_queues; i++) {
3465 req = ha->req_q_map[i]; 3533 req = ha->req_q_map[i];
3466 if (req) { 3534 if (req) {
3467 /* Clear outstanding commands array. */ 3535 /* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3566 nv = ha->nvram; 3634 nv = ha->nvram;
3567 3635
3568 /* Determine NVRAM starting address. */ 3636 /* Determine NVRAM starting address. */
3569 ha->nvram_size = sizeof(struct nvram_24xx); 3637 if (ha->flags.port0) {
3570 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3571 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3572 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3640 } else {
3573 if (PCI_FUNC(ha->pdev->devfn)) {
3574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3575 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3576 } 3643 }
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3577 3646
3578 /* Get VPD data into cache */ 3647 /* Get VPD data into cache */
3579 ha->vpd = ha->nvram + VPD_OFFSET; 3648 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3587 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3588 chksum += le32_to_cpu(*dptr++); 3657 chksum += le32_to_cpu(*dptr++);
3589 3658
3590 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3591 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3592 3661
3593 /* Bad NVRAM data, set defaults parameters. */ 3662 /* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3612 nv->exchange_count = __constant_cpu_to_le16(0); 3681 nv->exchange_count = __constant_cpu_to_le16(0);
3613 nv->hard_address = __constant_cpu_to_le16(124); 3682 nv->hard_address = __constant_cpu_to_le16(124);
3614 nv->port_name[0] = 0x21; 3683 nv->port_name[0] = 0x21;
3615 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3684 nv->port_name[1] = 0x00 + ha->port_no;
3616 nv->port_name[2] = 0x00; 3685 nv->port_name[2] = 0x00;
3617 nv->port_name[3] = 0xe0; 3686 nv->port_name[3] = 0xe0;
3618 nv->port_name[4] = 0x8b; 3687 nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3798} 3867}
3799 3868
3800static int 3869static int
3801qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3870qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3871 uint32_t faddr)
3802{ 3872{
3803 int rval = QLA_SUCCESS; 3873 int rval = QLA_SUCCESS;
3804 int segments, fragment; 3874 int segments, fragment;
3805 uint32_t faddr;
3806 uint32_t *dcode, dlen; 3875 uint32_t *dcode, dlen;
3807 uint32_t risc_addr; 3876 uint32_t risc_addr;
3808 uint32_t risc_size; 3877 uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3811 struct req_que *req = ha->req_q_map[0]; 3880 struct req_que *req = ha->req_q_map[0];
3812 3881
3813 qla_printk(KERN_INFO, ha, 3882 qla_printk(KERN_INFO, ha,
3814 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3883 "FW: Loading from flash (%x)...\n", faddr);
3815 3884
3816 rval = QLA_SUCCESS; 3885 rval = QLA_SUCCESS;
3817 3886
3818 segments = FA_RISC_CODE_SEGMENTS; 3887 segments = FA_RISC_CODE_SEGMENTS;
3819 faddr = ha->flt_region_fw;
3820 dcode = (uint32_t *)req->ring; 3888 dcode = (uint32_t *)req->ring;
3821 *srisc_addr = 0; 3889 *srisc_addr = 0;
3822 3890
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4104{ 4172{
4105 int rval; 4173 int rval;
4106 4174
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4177
4107 /* 4178 /*
4108 * FW Load priority: 4179 * FW Load priority:
4109 * 1) Firmware via request-firmware interface (.bin file). 4180 * 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4113 if (rval == QLA_SUCCESS) 4184 if (rval == QLA_SUCCESS)
4114 return rval; 4185 return rval;
4115 4186
4116 return qla24xx_load_risc_flash(vha, srisc_addr); 4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4117} 4189}
4118 4190
4119int 4191int
4120qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4192qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4121{ 4193{
4122 int rval; 4194 int rval;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 if (ql2xfwloadbin == 2)
4198 goto try_blob_fw;
4123 4199
4124 /* 4200 /*
4125 * FW Load priority: 4201 * FW Load priority:
4126 * 1) Firmware residing in flash. 4202 * 1) Firmware residing in flash.
4127 * 2) Firmware via request-firmware interface (.bin file). 4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4128 */ 4205 */
4129 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4130 if (rval == QLA_SUCCESS) 4207 if (rval == QLA_SUCCESS)
4131 return rval; 4208 return rval;
4132 4209
4133 return qla24xx_load_risc_blob(vha, srisc_addr); 4210try_blob_fw:
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4213 return rval;
4214
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4219 return rval;
4220
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4224
4225 return rval;
4134} 4226}
4135 4227
4136void 4228void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4146 4238
4147 ret = qla2x00_stop_firmware(vha); 4239 ret = qla2x00_stop_firmware(vha);
4148 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4149 retries ; retries--) { 4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4150 ha->isp_ops->reset_chip(vha); 4242 ha->isp_ops->reset_chip(vha);
4151 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4152 continue; 4244 continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4166 struct qla_hw_data *ha = vha->hw; 4258 struct qla_hw_data *ha = vha->hw;
4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4260 struct req_que *req;
4169 struct rsp_que *rsp = req->rsp; 4261 struct rsp_que *rsp;
4170 4262
4171 if (!vha->vp_idx) 4263 if (!vha->vp_idx)
4172 return -EINVAL; 4264 return -EINVAL;
4173 4265
4174 rval = qla2x00_fw_ready(base_vha); 4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4269 else
4270 req = vha->req;
4271 rsp = req->rsp;
4272
4175 if (rval == QLA_SUCCESS) { 4273 if (rval == QLA_SUCCESS) {
4176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4177 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4305 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4306 chksum += le32_to_cpu(*dptr++); 4404 chksum += le32_to_cpu(*dptr++);
4307 4405
4308 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4309 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4310 4408
4311 /* Bad NVRAM data, set defaults parameters. */ 4409 /* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4329 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4330 nv->exchange_count = __constant_cpu_to_le16(0); 4428 nv->exchange_count = __constant_cpu_to_le16(0);
4331 nv->port_name[0] = 0x21; 4429 nv->port_name[0] = 0x21;
4332 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4430 nv->port_name[1] = 0x00 + ha->port_no;
4333 nv->port_name[2] = 0x00; 4431 nv->port_name[2] = 0x00;
4334 nv->port_name[3] = 0xe0; 4432 nv->port_name[3] = 0xe0;
4335 nv->port_name[4] = 0x8b; 4433 nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4358 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4359 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4360 nv->link_down_timeout = __constant_cpu_to_le16(30); 4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4361 nv->enode_mac[0] = 0x01; 4459 nv->enode_mac[0] = 0x00;
4362 nv->enode_mac[1] = 0x02; 4460 nv->enode_mac[1] = 0x02;
4363 nv->enode_mac[2] = 0x03; 4461 nv->enode_mac[2] = 0x03;
4364 nv->enode_mac[3] = 0x04; 4462 nv->enode_mac[3] = 0x04;
4365 nv->enode_mac[4] = 0x05; 4463 nv->enode_mac[4] = 0x05;
4366 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4367 4465
4368 rval = 1; 4466 rval = 1;
4369 } 4467 }
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4396 icb->enode_mac[2] = 0x03; 4494 icb->enode_mac[2] = 0x03;
4397 icb->enode_mac[3] = 0x04; 4495 icb->enode_mac[3] = 0x04;
4398 icb->enode_mac[4] = 0x05; 4496 icb->enode_mac[4] = 0x05;
4399 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4400 } 4498 }
4401 4499
4402 /* Use extended-initialization control block. */ 4500 /* Use extended-initialization control block. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730d..13396beae2ce 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); 16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 17
18static void qla25xx_set_que(srb_t *, struct rsp_que **);
18/** 19/**
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * @cmd: SCSI command 21 * @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */ 94 */
94static inline cont_entry_t * 95static inline cont_entry_t *
95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) 96qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96{ 97{
97 cont_entry_t *cont_pkt; 98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
98 /* Adjust ring index. */ 100 /* Adjust ring index. */
99 req->ring_index++; 101 req->ring_index++;
100 if (req->ring_index == req->length) { 102 if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 122 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 123 */
122static inline cont_a64_entry_t * 124static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) 125qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124{ 126{
125 cont_a64_entry_t *cont_pkt; 127 cont_a64_entry_t *cont_pkt;
126 128
129 struct req_que *req = vha->req;
127 /* Adjust ring index. */ 130 /* Adjust ring index. */
128 req->ring_index++; 131 req->ring_index++;
129 if (req->ring_index == req->length) { 132 if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
159 struct scsi_cmnd *cmd; 162 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 163 struct scatterlist *sg;
161 int i; 164 int i;
162 struct req_que *req;
163 165
164 cmd = sp->cmd; 166 cmd = sp->cmd;
165 167
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
174 } 176 }
175 177
176 vha = sp->fcport->vha; 178 vha = sp->fcport->vha;
177 req = sp->que;
178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 180
181 /* Three DSDs are available in the Command Type 2 IOCB */ 181 /* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB. 193 * Type 0 IOCB.
194 */ 194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7; 197 avail_dsds = 7;
198 } 198 }
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg; 221 struct scatterlist *sg;
222 int i; 222 int i;
223 struct req_que *req;
224 223
225 cmd = sp->cmd; 224 cmd = sp->cmd;
226 225
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
235 } 234 }
236 235
237 vha = sp->fcport->vha; 236 vha = sp->fcport->vha;
238 req = sp->que;
239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
241 238
242 /* Two DSDs are available in the Command Type 3 IOCB */ 239 /* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 * Five DSDs are available in the Continuation 251 * Five DSDs are available in the Continuation
255 * Type 1 IOCB. 252 * Type 1 IOCB.
256 */ 253 */
257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 avail_dsds = 5; 256 avail_dsds = 5;
260 } 257 }
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 350 /* Build command packet */
354 req->current_outstanding_cmd = handle; 351 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 352 req->outstanding_cmds[handle] = sp;
356 sp->que = req;
357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
358 req->cnt -= req_cnt; 354 req->cnt -= req_cnt;
359 355
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
453 mrk24->lun[2] = MSB(lun); 449 mrk24->lun[2] = MSB(lun);
454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 mrk24->vp_index = vha->vp_idx; 451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
456 } else { 453 } else {
457 SET_TARGET_ID(ha, mrk->target, loop_id); 454 SET_TARGET_ID(ha, mrk->target, loop_id);
458 mrk->lun = cpu_to_le16(lun); 455 mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
531 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) 528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 *dword_ptr++ = 0; 529 *dword_ptr++ = 0;
533 530
534 /* Set system defined field. */
535 pkt->sys_define = (uint8_t)req->ring_index;
536
537 /* Set entry count. */ 531 /* Set entry count. */
538 pkt->entry_count = 1; 532 pkt->entry_count = 1;
539 533
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 } 650 }
657 651
658 vha = sp->fcport->vha; 652 vha = sp->fcport->vha;
659 req = sp->que; 653 req = vha->req;
660 654
661 /* Set transfer direction */ 655 /* Set transfer direction */
662 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
687 * Five DSDs are available in the Continuation 681 * Five DSDs are available in the Continuation
688 * Type 1 IOCB. 682 * Type 1 IOCB.
689 */ 683 */
690 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
691 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
692 avail_dsds = 5; 686 avail_dsds = 5;
693 } 687 }
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
724 struct scsi_cmnd *cmd = sp->cmd; 718 struct scsi_cmnd *cmd = sp->cmd;
725 struct scsi_qla_host *vha = sp->fcport->vha; 719 struct scsi_qla_host *vha = sp->fcport->vha;
726 struct qla_hw_data *ha = vha->hw; 720 struct qla_hw_data *ha = vha->hw;
727 uint16_t que_id;
728 721
729 /* Setup device pointers. */ 722 /* Setup device pointers. */
730 ret = 0; 723 ret = 0;
731 que_id = vha->req_ques[0];
732 724
733 req = ha->req_q_map[que_id]; 725 qla25xx_set_que(sp, &rsp);
734 sp->que = req; 726 req = vha->req;
735 727
736 if (req->rsp)
737 rsp = req->rsp;
738 else
739 rsp = ha->rsp_q_map[que_id];
740 /* So we know we haven't pci_map'ed anything yet */ 728 /* So we know we haven't pci_map'ed anything yet */
741 tot_dsds = 0; 729 tot_dsds = 0;
742 730
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
794 req->cnt -= req_cnt; 782 req->cnt -= req_cnt;
795 783
796 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 cmd_pkt->handle = handle; 785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
798 786
799 /* Zero out remaining portion of packet. */ 787 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
823 811
824 /* Set total data segment count. */ 812 /* Set total data segment count. */
825 cmd_pkt->entry_count = (uint8_t)req_cnt; 813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
826 wmb(); 816 wmb();
827 817
828 /* Adjust ring index. */ 818 /* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
842 /* Manage unprocessed RIO/ZIO commands in response queue. */ 832 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha->flags.process_response_queue && 833 if (vha->flags.process_response_queue &&
844 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
845 qla24xx_process_response_queue(rsp); 835 qla24xx_process_response_queue(vha, rsp);
846 836
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_SUCCESS; 838 return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
855 845
856 return QLA_FUNCTION_FAILED; 846 return QLA_FUNCTION_FAILED;
857} 847}
848
849static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
850{
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
854
855 if (ql2xmultique_tag && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];
860}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e56..c8d0a176fea4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *); 18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20 19
21/** 20/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
51 status = 0; 50 status = 0;
52 51
53 spin_lock(&ha->hardware_lock); 52 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp); 53 vha = pci_get_drvdata(ha->pdev);
55 for (iter = 50; iter--; ) { 54 for (iter = 50; iter--; ) {
56 hccr = RD_REG_WORD(&reg->hccr); 55 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) { 56 if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
147 status = 0; 146 status = 0;
148 147
149 spin_lock(&ha->hardware_lock); 148 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp); 149 vha = pci_get_drvdata(ha->pdev);
151 for (iter = 50; iter--; ) { 150 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 151 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) { 152 if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
685 vha->host_no)); 684 vha->host_no));
686 685
687 if (IS_FWI2_CAPABLE(ha)) 686 if (IS_FWI2_CAPABLE(ha))
688 qla24xx_process_response_queue(rsp); 687 qla24xx_process_response_queue(vha, rsp);
689 else 688 else
690 qla2x00_process_response_queue(rsp); 689 qla2x00_process_response_queue(rsp);
691 break; 690 break;
@@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
766 struct qla_hw_data *ha = vha->hw; 765 struct qla_hw_data *ha = vha->hw;
767 struct req_que *req = NULL; 766 struct req_que *req = NULL;
768 767
769 req = ha->req_q_map[vha->req_ques[0]]; 768 if (!ql2xqfulltracking)
769 return;
770
771 req = vha->req;
770 if (!req) 772 if (!req)
771 return; 773 return;
772 if (req->max_q_depth <= sdev->queue_depth) 774 if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
808 fc_port_t *fcport; 810 fc_port_t *fcport;
809 struct scsi_device *sdev; 811 struct scsi_device *sdev;
810 812
813 if (!ql2xqfulltracking)
814 return;
815
811 sdev = sp->cmd->device; 816 sdev = sp->cmd->device;
812 if (sdev->queue_depth >= req->max_q_depth) 817 if (sdev->queue_depth >= req->max_q_depth)
813 return; 818 return;
@@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
858 qla2x00_ramp_up_queue_depth(vha, req, sp); 863 qla2x00_ramp_up_queue_depth(vha, req, sp);
859 qla2x00_sp_compl(ha, sp); 864 qla2x00_sp_compl(ha, sp);
860 } else { 865 } else {
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 866 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
862 vha->host_no)); 867 " handle(%d)\n", vha->host_no, req->id, index));
863 qla_printk(KERN_WARNING, ha, 868 qla_printk(KERN_WARNING, ha,
864 "Invalid ISP SCSI completion handle\n"); 869 "Invalid ISP SCSI completion handle\n");
865 870
@@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
881 uint16_t handle_cnt; 886 uint16_t handle_cnt;
882 uint16_t cnt; 887 uint16_t cnt;
883 888
884 vha = qla2x00_get_rsp_host(rsp); 889 vha = pci_get_drvdata(ha->pdev);
885 890
886 if (!vha->flags.online) 891 if (!vha->flags.online)
887 return; 892 return;
@@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
926 } 931 }
927 break; 932 break;
928 case STATUS_CONT_TYPE: 933 case STATUS_CONT_TYPE:
929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 934 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
930 break; 935 break;
931 default: 936 default:
932 /* Type Not Supported. */ 937 /* Type Not Supported. */
@@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
945} 950}
946 951
947static inline void 952static inline void
948qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 953qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
954 struct rsp_que *rsp)
949{ 955{
950 struct scsi_cmnd *cp = sp->cmd; 956 struct scsi_cmnd *cp = sp->cmd;
951 957
@@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
962 sp->request_sense_ptr += sense_len; 968 sp->request_sense_ptr += sense_len;
963 sp->request_sense_length -= sense_len; 969 sp->request_sense_length -= sense_len;
964 if (sp->request_sense_length != 0) 970 if (sp->request_sense_length != 0)
965 sp->fcport->vha->status_srb = sp; 971 rsp->status_srb = sp;
966 972
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 973 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 974 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 998 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
993 uint8_t *rsp_info, *sense_data; 999 uint8_t *rsp_info, *sense_data;
994 struct qla_hw_data *ha = vha->hw; 1000 struct qla_hw_data *ha = vha->hw;
995 struct req_que *req = rsp->req; 1001 uint32_t handle;
1002 uint16_t que;
1003 struct req_que *req;
996 1004
997 sts = (sts_entry_t *) pkt; 1005 sts = (sts_entry_t *) pkt;
998 sts24 = (struct sts_entry_24xx *) pkt; 1006 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1003 comp_status = le16_to_cpu(sts->comp_status); 1011 comp_status = le16_to_cpu(sts->comp_status);
1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1012 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1005 } 1013 }
1006 1014 handle = (uint32_t) LSW(sts->handle);
1015 que = MSW(sts->handle);
1016 req = ha->req_q_map[que];
1007 /* Fast path completion. */ 1017 /* Fast path completion. */
1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1018 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1009 qla2x00_process_completed_request(vha, req, sts->handle); 1019 qla2x00_process_completed_request(vha, req, handle);
1010 1020
1011 return; 1021 return;
1012 } 1022 }
1013 1023
1014 /* Validate handle. */ 1024 /* Validate handle. */
1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1025 if (handle < MAX_OUTSTANDING_COMMANDS) {
1016 sp = req->outstanding_cmds[sts->handle]; 1026 sp = req->outstanding_cmds[handle];
1017 req->outstanding_cmds[sts->handle] = NULL; 1027 req->outstanding_cmds[handle] = NULL;
1018 } else 1028 } else
1019 sp = NULL; 1029 sp = NULL;
1020 1030
@@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1030 cp = sp->cmd; 1040 cp = sp->cmd;
1031 if (cp == NULL) { 1041 if (cp == NULL) {
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1042 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1043 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1034 qla_printk(KERN_WARNING, ha, 1044 qla_printk(KERN_WARNING, ha,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1045 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1036 1046
@@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1121 scsi_status)); 1131 scsi_status));
1122 1132
1123 /* Adjust queue depth for all luns on the port. */ 1133 /* Adjust queue depth for all luns on the port. */
1134 if (!ql2xqfulltracking)
1135 break;
1124 fcport->last_queue_full = jiffies; 1136 fcport->last_queue_full = jiffies;
1125 starget_for_each_device(cp->device->sdev_target, 1137 starget_for_each_device(cp->device->sdev_target,
1126 fcport, qla2x00_adjust_sdev_qdepth_down); 1138 fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1145 if (!(scsi_status & SS_SENSE_LEN_VALID))
1134 break; 1146 break;
1135 1147
1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1148 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1137 break; 1149 break;
1138 1150
1139 case CS_DATA_UNDERRUN: 1151 case CS_DATA_UNDERRUN:
@@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1179 * Adjust queue depth for all luns on the 1191 * Adjust queue depth for all luns on the
1180 * port. 1192 * port.
1181 */ 1193 */
1194 if (!ql2xqfulltracking)
1195 break;
1182 fcport->last_queue_full = jiffies; 1196 fcport->last_queue_full = jiffies;
1183 starget_for_each_device( 1197 starget_for_each_device(
1184 cp->device->sdev_target, fcport, 1198 cp->device->sdev_target, fcport,
@@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1206 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193 break; 1207 break;
1194 1208
1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1209 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196 } else { 1210 } else {
1197 /* 1211 /*
1198 * If RISC reports underrun and target does not report 1212 * If RISC reports underrun and target does not report
1199 * it then we must have a lost frame, so tell upper 1213 * it then we must have a lost frame, so tell upper
1200 * layer to retry it by reporting a bus busy. 1214 * layer to retry it by reporting an error.
1201 */ 1215 */
1202 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1216 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1217 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1207 cp->device->id, cp->device->lun, resid, 1221 cp->device->id, cp->device->lun, resid,
1208 scsi_bufflen(cp))); 1222 scsi_bufflen(cp)));
1209 1223
1210 cp->result = DID_BUS_BUSY << 16; 1224 cp->result = DID_ERROR << 16;
1211 break; 1225 break;
1212 } 1226 }
1213 1227
@@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1334 } 1348 }
1335 1349
1336 /* Place command on done queue. */ 1350 /* Place command on done queue. */
1337 if (vha->status_srb == NULL) 1351 if (rsp->status_srb == NULL)
1338 qla2x00_sp_compl(ha, sp); 1352 qla2x00_sp_compl(ha, sp);
1339} 1353}
1340 1354
@@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1346 * Extended sense data. 1360 * Extended sense data.
1347 */ 1361 */
1348static void 1362static void
1349qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1363qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1350{ 1364{
1351 uint8_t sense_sz = 0; 1365 uint8_t sense_sz = 0;
1352 struct qla_hw_data *ha = vha->hw; 1366 struct qla_hw_data *ha = rsp->hw;
1353 srb_t *sp = vha->status_srb; 1367 srb_t *sp = rsp->status_srb;
1354 struct scsi_cmnd *cp; 1368 struct scsi_cmnd *cp;
1355 1369
1356 if (sp != NULL && sp->request_sense_length != 0) { 1370 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1376 "cmd is NULL: already returned to OS (sp=%p)\n",
1363 sp); 1377 sp);
1364 1378
1365 vha->status_srb = NULL; 1379 rsp->status_srb = NULL;
1366 return; 1380 return;
1367 } 1381 }
1368 1382
@@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1383 1397
1384 /* Place command on done queue. */ 1398 /* Place command on done queue. */
1385 if (sp->request_sense_length == 0) { 1399 if (sp->request_sense_length == 0) {
1386 vha->status_srb = NULL; 1400 rsp->status_srb = NULL;
1387 qla2x00_sp_compl(ha, sp); 1401 qla2x00_sp_compl(ha, sp);
1388 } 1402 }
1389 } 1403 }
@@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1399{ 1413{
1400 srb_t *sp; 1414 srb_t *sp;
1401 struct qla_hw_data *ha = vha->hw; 1415 struct qla_hw_data *ha = vha->hw;
1402 struct req_que *req = rsp->req; 1416 uint32_t handle = LSW(pkt->handle);
1417 uint16_t que = MSW(pkt->handle);
1418 struct req_que *req = ha->req_q_map[que];
1403#if defined(QL_DEBUG_LEVEL_2) 1419#if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt->entry_status & RF_INV_E_ORDER) 1420 if (pkt->entry_status & RF_INV_E_ORDER)
1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1421 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1417#endif 1433#endif
1418 1434
1419 /* Validate handle. */ 1435 /* Validate handle. */
1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1436 if (handle < MAX_OUTSTANDING_COMMANDS)
1421 sp = req->outstanding_cmds[pkt->handle]; 1437 sp = req->outstanding_cmds[handle];
1422 else 1438 else
1423 sp = NULL; 1439 sp = NULL;
1424 1440
1425 if (sp) { 1441 if (sp) {
1426 /* Free outstanding command slot. */ 1442 /* Free outstanding command slot. */
1427 req->outstanding_cmds[pkt->handle] = NULL; 1443 req->outstanding_cmds[handle] = NULL;
1428 1444
1429 /* Bad payload or header */ 1445 /* Bad payload or header */
1430 if (pkt->entry_status & 1446 if (pkt->entry_status &
@@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1486 * qla24xx_process_response_queue() - Process response queue entries. 1502 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context 1503 * @ha: SCSI driver HA context
1488 */ 1504 */
1489void 1505void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1490qla24xx_process_response_queue(struct rsp_que *rsp) 1506 struct rsp_que *rsp)
1491{ 1507{
1492 struct sts_entry_24xx *pkt; 1508 struct sts_entry_24xx *pkt;
1493 struct scsi_qla_host *vha;
1494
1495 vha = qla2x00_get_rsp_host(rsp);
1496 1509
1497 if (!vha->flags.online) 1510 if (!vha->flags.online)
1498 return; 1511 return;
@@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
1523 qla2x00_status_entry(vha, rsp, pkt); 1536 qla2x00_status_entry(vha, rsp, pkt);
1524 break; 1537 break;
1525 case STATUS_CONT_TYPE: 1538 case STATUS_CONT_TYPE:
1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1539 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1527 break; 1540 break;
1528 case VP_RPT_ID_IOCB_TYPE: 1541 case VP_RPT_ID_IOCB_TYPE:
1529 qla24xx_report_id_acquisition(vha, 1542 qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1626 status = 0; 1639 status = 0;
1627 1640
1628 spin_lock(&ha->hardware_lock); 1641 spin_lock(&ha->hardware_lock);
1629 vha = qla2x00_get_rsp_host(rsp); 1642 vha = pci_get_drvdata(ha->pdev);
1630 for (iter = 50; iter--; ) { 1643 for (iter = 50; iter--; ) {
1631 stat = RD_REG_DWORD(&reg->host_status); 1644 stat = RD_REG_DWORD(&reg->host_status);
1632 if (stat & HSRX_RISC_PAUSED) { 1645 if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1664 break; 1677 break;
1665 case 0x13: 1678 case 0x13:
1666 case 0x14: 1679 case 0x14:
1667 qla24xx_process_response_queue(rsp); 1680 qla24xx_process_response_queue(vha, rsp);
1668 break; 1681 break;
1669 default: 1682 default:
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1683 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1692 struct qla_hw_data *ha; 1705 struct qla_hw_data *ha;
1693 struct rsp_que *rsp; 1706 struct rsp_que *rsp;
1694 struct device_reg_24xx __iomem *reg; 1707 struct device_reg_24xx __iomem *reg;
1708 struct scsi_qla_host *vha;
1695 1709
1696 rsp = (struct rsp_que *) dev_id; 1710 rsp = (struct rsp_que *) dev_id;
1697 if (!rsp) { 1711 if (!rsp) {
@@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1704 1718
1705 spin_lock_irq(&ha->hardware_lock); 1719 spin_lock_irq(&ha->hardware_lock);
1706 1720
1707 qla24xx_process_response_queue(rsp); 1721 vha = qla25xx_get_host(rsp);
1722 qla24xx_process_response_queue(vha, rsp);
1708 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1723 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1709 1724
1710 spin_unlock_irq(&ha->hardware_lock); 1725 spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1717{ 1732{
1718 struct qla_hw_data *ha; 1733 struct qla_hw_data *ha;
1719 struct rsp_que *rsp; 1734 struct rsp_que *rsp;
1720 struct device_reg_24xx __iomem *reg;
1721 1735
1722 rsp = (struct rsp_que *) dev_id; 1736 rsp = (struct rsp_que *) dev_id;
1723 if (!rsp) { 1737 if (!rsp) {
@@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1726 return IRQ_NONE; 1740 return IRQ_NONE;
1727 } 1741 }
1728 ha = rsp->hw; 1742 ha = rsp->hw;
1729 reg = &ha->iobase->isp24;
1730 1743
1731 spin_lock_irq(&ha->hardware_lock); 1744 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1732
1733 qla24xx_process_response_queue(rsp);
1734
1735 spin_unlock_irq(&ha->hardware_lock);
1736 1745
1737 return IRQ_HANDLED; 1746 return IRQ_HANDLED;
1738} 1747}
@@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1760 status = 0; 1769 status = 0;
1761 1770
1762 spin_lock_irq(&ha->hardware_lock); 1771 spin_lock_irq(&ha->hardware_lock);
1763 vha = qla2x00_get_rsp_host(rsp); 1772 vha = pci_get_drvdata(ha->pdev);
1764 do { 1773 do {
1765 stat = RD_REG_DWORD(&reg->host_status); 1774 stat = RD_REG_DWORD(&reg->host_status);
1766 if (stat & HSRX_RISC_PAUSED) { 1775 if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1798 break; 1807 break;
1799 case 0x13: 1808 case 0x13:
1800 case 0x14: 1809 case 0x14:
1801 qla24xx_process_response_queue(rsp); 1810 qla24xx_process_response_queue(vha, rsp);
1802 break; 1811 break;
1803 default: 1812 default:
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1813 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id)
1822/* Interrupt handling helpers. */ 1831/* Interrupt handling helpers. */
1823 1832
1824struct qla_init_msix_entry { 1833struct qla_init_msix_entry {
1825 uint16_t entry;
1826 uint16_t index;
1827 const char *name; 1834 const char *name;
1828 irq_handler_t handler; 1835 irq_handler_t handler;
1829}; 1836};
1830 1837
1831static struct qla_init_msix_entry base_queue = { 1838static struct qla_init_msix_entry msix_entries[3] = {
1832 .entry = 0, 1839 { "qla2xxx (default)", qla24xx_msix_default },
1833 .index = 0, 1840 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1834 .name = "qla2xxx (default)", 1841 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
1835 .handler = qla24xx_msix_default,
1836};
1837
1838static struct qla_init_msix_entry base_rsp_queue = {
1839 .entry = 1,
1840 .index = 1,
1841 .name = "qla2xxx (rsp_q)",
1842 .handler = qla24xx_msix_rsp_q,
1843};
1844
1845static struct qla_init_msix_entry multi_rsp_queue = {
1846 .entry = 1,
1847 .index = 1,
1848 .name = "qla2xxx (multi_q)",
1849 .handler = qla25xx_msix_rsp_q,
1850}; 1842};
1851 1843
1852static void 1844static void
@@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1873 int i, ret; 1865 int i, ret;
1874 struct msix_entry *entries; 1866 struct msix_entry *entries;
1875 struct qla_msix_entry *qentry; 1867 struct qla_msix_entry *qentry;
1876 struct qla_init_msix_entry *msix_queue;
1877 1868
1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1869 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1879 GFP_KERNEL); 1870 GFP_KERNEL);
@@ -1900,7 +1891,7 @@ msix_failed:
1900 ha->msix_count, ret); 1891 ha->msix_count, ret);
1901 goto msix_out; 1892 goto msix_out;
1902 } 1893 }
1903 ha->max_queues = ha->msix_count - 1; 1894 ha->max_rsp_queues = ha->msix_count - 1;
1904 } 1895 }
1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1896 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1906 ha->msix_count, GFP_KERNEL); 1897 ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1909,27 @@ msix_failed:
1918 qentry->rsp = NULL; 1909 qentry->rsp = NULL;
1919 } 1910 }
1920 1911
1921 /* Enable MSI-X for AENs for queue 0 */ 1912 /* Enable MSI-X vectors for the base queue */
1922 qentry = &ha->msix_entries[0]; 1913 for (i = 0; i < 2; i++) {
1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1914 qentry = &ha->msix_entries[i];
1924 base_queue.name, rsp); 1915 ret = request_irq(qentry->vector, msix_entries[i].handler,
1925 if (ret) { 1916 0, msix_entries[i].name, rsp);
1926 qla_printk(KERN_WARNING, ha, 1917 if (ret) {
1918 qla_printk(KERN_WARNING, ha,
1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1919 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry->vector, ret); 1920 qentry->vector, ret);
1929 qla24xx_disable_msix(ha); 1921 qla24xx_disable_msix(ha);
1930 goto msix_out; 1922 ha->mqenable = 0;
1923 goto msix_out;
1924 }
1925 qentry->have_irq = 1;
1926 qentry->rsp = rsp;
1927 rsp->msix = qentry;
1931 } 1928 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934 1929
1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1930 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha->max_queues > 1 && ha->mqiobase) { 1931 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1937 ha->mqenable = 1; 1932 ha->mqenable = 1;
1938 msix_queue = &multi_rsp_queue;
1939 qla_printk(KERN_INFO, ha,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1941 ha->max_queues);
1942 } else {
1943 ha->mqenable = 0;
1944 msix_queue = &base_rsp_queue;
1945 }
1946
1947 qentry = &ha->msix_entries[1];
1948 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1949 msix_queue->name, rsp);
1950 if (ret) {
1951 qla_printk(KERN_WARNING, ha,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry->vector, ret);
1954 qla24xx_disable_msix(ha);
1955 ha->mqenable = 0;
1956 goto msix_out;
1957 }
1958 qentry->have_irq = 1;
1959 qentry->rsp = rsp;
1960 1933
1961msix_out: 1934msix_out:
1962 kfree(entries); 1935 kfree(entries);
@@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2063 } 2036 }
2064} 2037}
2065 2038
2066static struct scsi_qla_host *
2067qla2x00_get_rsp_host(struct rsp_que *rsp)
2068{
2069 srb_t *sp;
2070 struct qla_hw_data *ha = rsp->hw;
2071 struct scsi_qla_host *vha = NULL;
2072 struct sts_entry_24xx *pkt;
2073 struct req_que *req;
2074
2075 if (rsp->id) {
2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2077 req = rsp->req;
2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2079 sp = req->outstanding_cmds[pkt->handle];
2080 if (sp)
2081 vha = sp->fcport->vha;
2082 }
2083 }
2084 if (!vha)
2085 /* handle it in base queue */
2086 vha = pci_get_drvdata(ha->pdev);
2087
2088 return vha;
2089}
2090 2039
2091int qla25xx_request_irq(struct rsp_que *rsp) 2040int qla25xx_request_irq(struct rsp_que *rsp)
2092{ 2041{
2093 struct qla_hw_data *ha = rsp->hw; 2042 struct qla_hw_data *ha = rsp->hw;
2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2043 struct qla_init_msix_entry *intr = &msix_entries[2];
2095 struct qla_msix_entry *msix = rsp->msix; 2044 struct qla_msix_entry *msix = rsp->msix;
2096 int ret; 2045 int ret;
2097 2046
@@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2106 msix->rsp = rsp; 2055 msix->rsp = rsp;
2107 return ret; 2056 return ret;
2108} 2057}
2058
2059struct scsi_qla_host *
2060qla25xx_get_host(struct rsp_que *rsp)
2061{
2062 srb_t *sp;
2063 struct qla_hw_data *ha = rsp->hw;
2064 struct scsi_qla_host *vha = NULL;
2065 struct sts_entry_24xx *pkt;
2066 struct req_que *req;
2067 uint16_t que;
2068 uint32_t handle;
2069
2070 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2071 que = MSW(pkt->handle);
2072 handle = (uint32_t) LSW(pkt->handle);
2073 req = ha->req_q_map[que];
2074 if (handle < MAX_OUTSTANDING_COMMANDS) {
2075 sp = req->outstanding_cmds[handle];
2076 if (sp)
2077 return sp->fcport->vha;
2078 else
2079 goto base_que;
2080 }
2081base_que:
2082 vha = pci_get_drvdata(ha->pdev);
2083 return vha;
2084}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e67c1660bf46..451ece0760b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
408 * Context: 408 * Context:
409 * Kernel context. 409 * Kernel context.
410 */ 410 */
411void 411int
412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, 413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
414 uint32_t *mpi_caps, uint8_t *phy) 414 uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
427 mcp->flags = 0; 427 mcp->flags = 0;
428 mcp->tov = MBX_TOV_SECONDS; 428 mcp->tov = MBX_TOV_SECONDS;
429 rval = qla2x00_mailbox_command(vha, mcp); 429 rval = qla2x00_mailbox_command(vha, mcp);
430 if (rval != QLA_SUCCESS)
431 goto failed;
430 432
431 /* Return mailbox data. */ 433 /* Return mailbox data. */
432 *major = mcp->mb[1]; 434 *major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
446 phy[1] = mcp->mb[9] >> 8; 448 phy[1] = mcp->mb[9] >> 8;
447 phy[2] = mcp->mb[9] & 0xff; 449 phy[2] = mcp->mb[9] & 0xff;
448 } 450 }
449 451failed:
450 if (rval != QLA_SUCCESS) { 452 if (rval != QLA_SUCCESS) {
451 /*EMPTY*/ 453 /*EMPTY*/
452 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 454 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
455 /*EMPTY*/ 457 /*EMPTY*/
456 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 458 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
457 } 459 }
460 return rval;
458} 461}
459 462
460/* 463/*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
748 * Kernel context. 751 * Kernel context.
749 */ 752 */
750int 753int
751qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 754qla2x00_abort_command(srb_t *sp)
752{ 755{
753 unsigned long flags = 0; 756 unsigned long flags = 0;
754 fc_port_t *fcport;
755 int rval; 757 int rval;
756 uint32_t handle = 0; 758 uint32_t handle = 0;
757 mbx_cmd_t mc; 759 mbx_cmd_t mc;
758 mbx_cmd_t *mcp = &mc; 760 mbx_cmd_t *mcp = &mc;
761 fc_port_t *fcport = sp->fcport;
762 scsi_qla_host_t *vha = fcport->vha;
759 struct qla_hw_data *ha = vha->hw; 763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = vha->req;
760 765
761 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 766 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
762 767
763 fcport = sp->fcport;
764
765 spin_lock_irqsave(&ha->hardware_lock, flags); 768 spin_lock_irqsave(&ha->hardware_lock, flags);
766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 769 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
767 if (req->outstanding_cmds[handle] == sp) 770 if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
800} 803}
801 804
802int 805int
803qla2x00_abort_target(struct fc_port *fcport, unsigned int l) 806qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
804{ 807{
805 int rval, rval2; 808 int rval, rval2;
806 mbx_cmd_t mc; 809 mbx_cmd_t mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
813 816
814 l = l; 817 l = l;
815 vha = fcport->vha; 818 vha = fcport->vha;
816 req = vha->hw->req_q_map[0]; 819 req = vha->hw->req_q_map[tag];
817 rsp = vha->hw->rsp_q_map[0]; 820 rsp = vha->hw->rsp_q_map[tag];
818 mcp->mb[0] = MBC_ABORT_TARGET; 821 mcp->mb[0] = MBC_ABORT_TARGET;
819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 822 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
820 if (HAS_EXTENDED_IDS(vha->hw)) { 823 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
850} 853}
851 854
852int 855int
853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) 856qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
854{ 857{
855 int rval, rval2; 858 int rval, rval2;
856 mbx_cmd_t mc; 859 mbx_cmd_t mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 865 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
863 866
864 vha = fcport->vha; 867 vha = fcport->vha;
865 req = vha->hw->req_q_map[0]; 868 req = vha->hw->req_q_map[tag];
866 rsp = vha->hw->rsp_q_map[0]; 869 rsp = vha->hw->rsp_q_map[tag];
867 mcp->mb[0] = MBC_LUN_RESET; 870 mcp->mb[0] = MBC_LUN_RESET;
868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 871 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
869 if (HAS_EXTENDED_IDS(vha->hw)) 872 if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
931 mcp->mb[9] = vha->vp_idx; 934 mcp->mb[9] = vha->vp_idx;
932 mcp->out_mb = MBX_9|MBX_0; 935 mcp->out_mb = MBX_9|MBX_0;
933 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 936 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
937 if (IS_QLA81XX(vha->hw))
938 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
934 mcp->tov = MBX_TOV_SECONDS; 939 mcp->tov = MBX_TOV_SECONDS;
935 mcp->flags = 0; 940 mcp->flags = 0;
936 rval = qla2x00_mailbox_command(vha, mcp); 941 rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
952 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 957 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
953 vha->host_no, rval)); 958 vha->host_no, rval));
954 } else { 959 } else {
955 /*EMPTY*/
956 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 960 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
957 vha->host_no)); 961 vha->host_no));
962
963 if (IS_QLA81XX(vha->hw)) {
964 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
965 vha->fcoe_fcf_idx = mcp->mb[10];
966 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
967 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
968 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
969 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
970 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
971 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
972 }
958 } 973 }
959 974
960 return rval; 975 return rval;
@@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1252 1267
1253 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1254 mcp->out_mb = MBX_0; 1269 mcp->out_mb = MBX_0;
1255 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1270 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1256 mcp->tov = MBX_TOV_SECONDS; 1271 mcp->tov = MBX_TOV_SECONDS;
1257 mcp->flags = 0; 1272 mcp->flags = 0;
1258 rval = qla2x00_mailbox_command(vha, mcp); 1273 rval = qla2x00_mailbox_command(vha, mcp);
@@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1261 states[0] = mcp->mb[1]; 1276 states[0] = mcp->mb[1];
1262 states[1] = mcp->mb[2]; 1277 states[1] = mcp->mb[2];
1263 states[2] = mcp->mb[3]; 1278 states[2] = mcp->mb[3];
1279 states[3] = mcp->mb[4];
1280 states[4] = mcp->mb[5];
1264 1281
1265 if (rval != QLA_SUCCESS) { 1282 if (rval != QLA_SUCCESS) {
1266 /*EMPTY*/ 1283 /*EMPTY*/
@@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1480 dma_addr_t lg_dma; 1497 dma_addr_t lg_dma;
1481 uint32_t iop[2]; 1498 uint32_t iop[2];
1482 struct qla_hw_data *ha = vha->hw; 1499 struct qla_hw_data *ha = vha->hw;
1500 struct req_que *req;
1501 struct rsp_que *rsp;
1483 1502
1484 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1503 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1485 1504
1505 if (ql2xmultique_tag)
1506 req = ha->req_q_map[0];
1507 else
1508 req = vha->req;
1509 rsp = req->rsp;
1510
1486 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1511 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1487 if (lg == NULL) { 1512 if (lg == NULL) {
1488 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1513 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1493 1518
1494 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1519 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1495 lg->entry_count = 1; 1520 lg->entry_count = 1;
1521 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1496 lg->nport_handle = cpu_to_le16(loop_id); 1522 lg->nport_handle = cpu_to_le16(loop_id);
1497 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1523 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1498 if (opt & BIT_0) 1524 if (opt & BIT_0)
@@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1741 struct logio_entry_24xx *lg; 1767 struct logio_entry_24xx *lg;
1742 dma_addr_t lg_dma; 1768 dma_addr_t lg_dma;
1743 struct qla_hw_data *ha = vha->hw; 1769 struct qla_hw_data *ha = vha->hw;
1770 struct req_que *req;
1771 struct rsp_que *rsp;
1744 1772
1745 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1773 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1746 1774
@@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1752 } 1780 }
1753 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1781 memset(lg, 0, sizeof(struct logio_entry_24xx));
1754 1782
1783 if (ql2xmaxqueues > 1)
1784 req = ha->req_q_map[0];
1785 else
1786 req = vha->req;
1787 rsp = req->rsp;
1755 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1788 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1756 lg->entry_count = 1; 1789 lg->entry_count = 1;
1790 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1757 lg->nport_handle = cpu_to_le16(loop_id); 1791 lg->nport_handle = cpu_to_le16(loop_id);
1758 lg->control_flags = 1792 lg->control_flags =
1759 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1793 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1864 mbx_cmd_t mc; 1898 mbx_cmd_t mc;
1865 mbx_cmd_t *mcp = &mc; 1899 mbx_cmd_t *mcp = &mc;
1866 1900
1867 if (IS_QLA81XX(vha->hw))
1868 return QLA_SUCCESS;
1869
1870 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1901 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1871 vha->host_no)); 1902 vha->host_no));
1872 1903
@@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2195} 2226}
2196 2227
2197int 2228int
2198qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 2229qla24xx_abort_command(srb_t *sp)
2199{ 2230{
2200 int rval; 2231 int rval;
2201 fc_port_t *fcport;
2202 unsigned long flags = 0; 2232 unsigned long flags = 0;
2203 2233
2204 struct abort_entry_24xx *abt; 2234 struct abort_entry_24xx *abt;
2205 dma_addr_t abt_dma; 2235 dma_addr_t abt_dma;
2206 uint32_t handle; 2236 uint32_t handle;
2237 fc_port_t *fcport = sp->fcport;
2238 struct scsi_qla_host *vha = fcport->vha;
2207 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2240 struct req_que *req = vha->req;
2208 2241
2209 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2242 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2210 2243
2211 fcport = sp->fcport;
2212
2213 spin_lock_irqsave(&ha->hardware_lock, flags); 2244 spin_lock_irqsave(&ha->hardware_lock, flags);
2214 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2245 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2215 if (req->outstanding_cmds[handle] == sp) 2246 if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2231 2262
2232 abt->entry_type = ABORT_IOCB_TYPE; 2263 abt->entry_type = ABORT_IOCB_TYPE;
2233 abt->entry_count = 1; 2264 abt->entry_count = 1;
2265 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2234 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2266 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2235 abt->handle_to_abort = handle; 2267 abt->handle_to_abort = handle;
2236 abt->port_id[0] = fcport->d_id.b.al_pa; 2268 abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd {
2272 2304
2273static int 2305static int
2274__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 2306__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2275 unsigned int l) 2307 unsigned int l, int tag)
2276{ 2308{
2277 int rval, rval2; 2309 int rval, rval2;
2278 struct tsk_mgmt_cmd *tsk; 2310 struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2286 2318
2287 vha = fcport->vha; 2319 vha = fcport->vha;
2288 ha = vha->hw; 2320 ha = vha->hw;
2289 req = ha->req_q_map[0]; 2321 req = vha->req;
2290 rsp = ha->rsp_q_map[0]; 2322 if (ql2xmultique_tag)
2323 rsp = ha->rsp_q_map[tag + 1];
2324 else
2325 rsp = req->rsp;
2291 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2326 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2292 if (tsk == NULL) { 2327 if (tsk == NULL) {
2293 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2328 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2298 2333
2299 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2334 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2300 tsk->p.tsk.entry_count = 1; 2335 tsk->p.tsk.entry_count = 1;
2336 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2301 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2337 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2302 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2338 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2303 tsk->p.tsk.control_flags = cpu_to_le32(type); 2339 tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2344} 2380}
2345 2381
2346int 2382int
2347qla24xx_abort_target(struct fc_port *fcport, unsigned int l) 2383qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2348{ 2384{
2349 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); 2385 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2350} 2386}
2351 2387
2352int 2388int
2353qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) 2389qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2354{ 2390{
2355 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); 2391 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2356} 2392}
2357 2393
2358int 2394int
@@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2446 if (rval != QLA_SUCCESS) { 2482 if (rval != QLA_SUCCESS) {
2447 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2483 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2448 vha->host_no, rval)); 2484 vha->host_no, rval));
2485 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2486 rval = QLA_INVALID_COMMAND;
2449 } else { 2487 } else {
2450 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2488 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2451 } 2489 }
@@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2717 if (vp_idx == 0) 2755 if (vp_idx == 0)
2718 return; 2756 return;
2719 2757
2720 if (MSB(stat) == 1) 2758 if (MSB(stat) == 1) {
2759 DEBUG2(printk("scsi(%ld): Could not acquire ID for "
2760 "VP[%d].\n", vha->host_no, vp_idx));
2721 return; 2761 return;
2762 }
2722 2763
2723 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2764 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
2724 if (vp_idx == vp->vp_idx) 2765 if (vp_idx == vp->vp_idx)
@@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3141 WRT_REG_DWORD(&reg->req_q_in, 0); 3182 WRT_REG_DWORD(&reg->req_q_in, 0);
3142 WRT_REG_DWORD(&reg->req_q_out, 0); 3183 WRT_REG_DWORD(&reg->req_q_out, 0);
3143 } 3184 }
3185 req->req_q_in = &reg->req_q_in;
3186 req->req_q_out = &reg->req_q_out;
3144 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3187 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3145 3188
3146 rval = qla2x00_mailbox_command(vha, mcp); 3189 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3167 mcp->mb[6] = MSW(MSD(rsp->dma)); 3210 mcp->mb[6] = MSW(MSD(rsp->dma));
3168 mcp->mb[7] = LSW(MSD(rsp->dma)); 3211 mcp->mb[7] = LSW(MSD(rsp->dma));
3169 mcp->mb[5] = rsp->length; 3212 mcp->mb[5] = rsp->length;
3170 mcp->mb[11] = rsp->vp_idx;
3171 mcp->mb[14] = rsp->msix->entry; 3213 mcp->mb[14] = rsp->msix->entry;
3172 mcp->mb[13] = rsp->rid; 3214 mcp->mb[13] = rsp->rid;
3173 3215
@@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3179 mcp->mb[8] = 0; 3221 mcp->mb[8] = 0;
3180 /* que out ptr index */ 3222 /* que out ptr index */
3181 mcp->mb[9] = 0; 3223 mcp->mb[9] = 0;
3182 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 3224 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3183 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3225 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3184 mcp->in_mb = MBX_0; 3226 mcp->in_mb = MBX_0;
3185 mcp->flags = MBX_DMA_OUT; 3227 mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3384 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3426 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
3385 vha->host_no, rval, mcp->mb[0])); 3427 vha->host_no, rval, mcp->mb[0]));
3386 } else { 3428 } else {
3387 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3429 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3388 } 3430 }
3389 3431
3390 return rval; 3432 return rval;
@@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3428 3470
3429 return rval; 3471 return rval;
3430} 3472}
3473
3474int
3475qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3476 uint16_t size_in_bytes, uint16_t *actual_size)
3477{
3478 int rval;
3479 mbx_cmd_t mc;
3480 mbx_cmd_t *mcp = &mc;
3481
3482 if (!IS_QLA81XX(vha->hw))
3483 return QLA_FUNCTION_FAILED;
3484
3485 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3486
3487 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3488 mcp->mb[2] = MSW(stats_dma);
3489 mcp->mb[3] = LSW(stats_dma);
3490 mcp->mb[6] = MSW(MSD(stats_dma));
3491 mcp->mb[7] = LSW(MSD(stats_dma));
3492 mcp->mb[8] = size_in_bytes >> 2;
3493 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3494 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3495 mcp->tov = MBX_TOV_SECONDS;
3496 mcp->flags = 0;
3497 rval = qla2x00_mailbox_command(vha, mcp);
3498
3499 if (rval != QLA_SUCCESS) {
3500 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3501 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3502 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3503 } else {
3504 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3505
3506 *actual_size = mcp->mb[2] << 2;
3507 }
3508
3509 return rval;
3510}
3511
3512int
3513qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3514 uint16_t size)
3515{
3516 int rval;
3517 mbx_cmd_t mc;
3518 mbx_cmd_t *mcp = &mc;
3519
3520 if (!IS_QLA81XX(vha->hw))
3521 return QLA_FUNCTION_FAILED;
3522
3523 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3524
3525 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3526 mcp->mb[1] = 0;
3527 mcp->mb[2] = MSW(tlv_dma);
3528 mcp->mb[3] = LSW(tlv_dma);
3529 mcp->mb[6] = MSW(MSD(tlv_dma));
3530 mcp->mb[7] = LSW(MSD(tlv_dma));
3531 mcp->mb[8] = size;
3532 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3533 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3534 mcp->tov = MBX_TOV_SECONDS;
3535 mcp->flags = 0;
3536 rval = qla2x00_mailbox_command(vha, mcp);
3537
3538 if (rval != QLA_SUCCESS) {
3539 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3540 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3541 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3542 } else {
3543 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3544 }
3545
3546 return rval;
3547}
3548
3549int
3550qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3551{
3552 int rval;
3553 mbx_cmd_t mc;
3554 mbx_cmd_t *mcp = &mc;
3555
3556 if (!IS_FWI2_CAPABLE(vha->hw))
3557 return QLA_FUNCTION_FAILED;
3558
3559 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3560
3561 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3562 mcp->mb[1] = LSW(risc_addr);
3563 mcp->mb[8] = MSW(risc_addr);
3564 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3565 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3566 mcp->tov = 30;
3567 mcp->flags = 0;
3568 rval = qla2x00_mailbox_command(vha, mcp);
3569 if (rval != QLA_SUCCESS) {
3570 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3571 vha->host_no, rval, mcp->mb[0]));
3572 } else {
3573 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3574 *data = mcp->mb[3] << 16 | mcp->mb[2];
3575 }
3576
3577 return rval;
3578}
3579
3580int
3581qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3582{
3583 int rval;
3584 mbx_cmd_t mc;
3585 mbx_cmd_t *mcp = &mc;
3586
3587 if (!IS_FWI2_CAPABLE(vha->hw))
3588 return QLA_FUNCTION_FAILED;
3589
3590 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3591
3592 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3593 mcp->mb[1] = LSW(risc_addr);
3594 mcp->mb[2] = LSW(data);
3595 mcp->mb[3] = MSW(data);
3596 mcp->mb[8] = MSW(risc_addr);
3597 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3598 mcp->in_mb = MBX_0;
3599 mcp->tov = 30;
3600 mcp->flags = 0;
3601 rval = qla2x00_mailbox_command(vha, mcp);
3602 if (rval != QLA_SUCCESS) {
3603 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3604 vha->host_no, rval, mcp->mb[0]));
3605 } else {
3606 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3607 }
3608
3609 return rval;
3610}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e3008..650bcef08f2a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 398
399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
400 400
401 memset(vha->req_ques, 0, sizeof(vha->req_ques)); 401 vha->req = base_vha->req;
402 vha->req_ques[0] = ha->req_q_map[0]->id; 402 host->can_queue = base_vha->req->length + 128;
403 host->can_queue = ha->req_q_map[0]->length + 128;
404 host->this_id = 255; 403 host->this_id = 255;
405 host->cmd_per_lun = 3; 404 host->cmd_per_lun = 3;
406 host->max_cmd_len = MAX_CMDSZ; 405 host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
515 514
516/* Delete all queues for a given vhost */ 515/* Delete all queues for a given vhost */
517int 516int
518qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) 517qla25xx_delete_queues(struct scsi_qla_host *vha)
519{ 518{
520 int cnt, ret = 0; 519 int cnt, ret = 0;
521 struct req_que *req = NULL; 520 struct req_que *req = NULL;
522 struct rsp_que *rsp = NULL; 521 struct rsp_que *rsp = NULL;
523 struct qla_hw_data *ha = vha->hw; 522 struct qla_hw_data *ha = vha->hw;
524 523
525 if (que_no) { 524 /* Delete request queues */
526 /* Delete request queue */ 525 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
527 req = ha->req_q_map[que_no]; 526 req = ha->req_q_map[cnt];
528 if (req) { 527 if (req) {
529 rsp = req->rsp;
530 ret = qla25xx_delete_req_que(vha, req); 528 ret = qla25xx_delete_req_que(vha, req);
531 if (ret != QLA_SUCCESS) { 529 if (ret != QLA_SUCCESS) {
532 qla_printk(KERN_WARNING, ha, 530 qla_printk(KERN_WARNING, ha,
533 "Couldn't delete req que %d\n", req->id); 531 "Couldn't delete req que %d\n",
532 req->id);
534 return ret; 533 return ret;
535 } 534 }
536 /* Delete associated response queue */
537 if (rsp) {
538 ret = qla25xx_delete_rsp_que(vha, rsp);
539 if (ret != QLA_SUCCESS) {
540 qla_printk(KERN_WARNING, ha,
541 "Couldn't delete rsp que %d\n",
542 rsp->id);
543 return ret;
544 }
545 }
546 } 535 }
547 } else { /* delete all queues of this host */ 536 }
548 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { 537
549 /* Delete request queues */ 538 /* Delete response queues */
550 req = ha->req_q_map[vha->req_ques[cnt]]; 539 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
551 if (req && req->id) { 540 rsp = ha->rsp_q_map[cnt];
552 rsp = req->rsp; 541 if (rsp) {
553 ret = qla25xx_delete_req_que(vha, req); 542 ret = qla25xx_delete_rsp_que(vha, rsp);
554 if (ret != QLA_SUCCESS) { 543 if (ret != QLA_SUCCESS) {
555 qla_printk(KERN_WARNING, ha, 544 qla_printk(KERN_WARNING, ha,
556 "Couldn't delete req que %d\n", 545 "Couldn't delete rsp que %d\n",
557 vha->req_ques[cnt]); 546 rsp->id);
558 return ret; 547 return ret;
559 }
560 vha->req_ques[cnt] = ha->req_q_map[0]->id;
561 /* Delete associated response queue */
562 if (rsp && rsp->id) {
563 ret = qla25xx_delete_rsp_que(vha, rsp);
564 if (ret != QLA_SUCCESS) {
565 qla_printk(KERN_WARNING, ha,
566 "Couldn't delete rsp que %d\n",
567 rsp->id);
568 return ret;
569 }
570 }
571 } 548 }
572 } 549 }
573 } 550 }
574 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
575 vha->vp_idx);
576 return ret; 551 return ret;
577} 552}
578 553
579int 554int
580qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 555qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
581 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) 556 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
582{ 557{
583 int ret = 0; 558 int ret = 0;
584 struct req_que *req = NULL; 559 struct req_que *req = NULL;
585 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 560 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
586 uint16_t que_id = 0; 561 uint16_t que_id = 0;
587 device_reg_t __iomem *reg; 562 device_reg_t __iomem *reg;
563 uint32_t cnt;
588 564
589 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 565 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
590 if (req == NULL) { 566 if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 } 580 }
605 581
606 mutex_lock(&ha->vport_lock); 582 mutex_lock(&ha->vport_lock);
607 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 583 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
608 if (que_id >= ha->max_queues) { 584 if (que_id >= ha->max_req_queues) {
609 mutex_unlock(&ha->vport_lock); 585 mutex_unlock(&ha->vport_lock);
610 qla_printk(KERN_INFO, ha, "No resources to create " 586 qla_printk(KERN_INFO, ha, "No resources to create "
611 "additional request queue\n"); 587 "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
617 req->vp_idx = vp_idx; 593 req->vp_idx = vp_idx;
618 req->qos = qos; 594 req->qos = qos;
619 595
620 if (ha->rsp_q_map[rsp_que]) { 596 if (rsp_que < 0)
597 req->rsp = NULL;
598 else
621 req->rsp = ha->rsp_q_map[rsp_que]; 599 req->rsp = ha->rsp_q_map[rsp_que];
622 req->rsp->req = req;
623 }
624 /* Use alternate PCI bus number */ 600 /* Use alternate PCI bus number */
625 if (MSB(req->rid)) 601 if (MSB(req->rid))
626 options |= BIT_4; 602 options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
628 if (LSB(req->rid)) 604 if (LSB(req->rid))
629 options |= BIT_5; 605 options |= BIT_5;
630 req->options = options; 606 req->options = options;
607
608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
609 req->outstanding_cmds[cnt] = NULL;
610 req->current_outstanding_cmd = 1;
611
631 req->ring_ptr = req->ring; 612 req->ring_ptr = req->ring;
632 req->ring_index = 0; 613 req->ring_index = 0;
633 req->cnt = req->length; 614 req->cnt = req->length;
634 req->id = que_id; 615 req->id = que_id;
635 reg = ISP_QUE_REG(ha, que_id); 616 reg = ISP_QUE_REG(ha, que_id);
636 req->req_q_in = &reg->isp25mq.req_q_in;
637 req->req_q_out = &reg->isp25mq.req_q_out;
638 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 617 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
639 mutex_unlock(&ha->vport_lock); 618 mutex_unlock(&ha->vport_lock);
640 619
@@ -654,10 +633,19 @@ que_failed:
654 return 0; 633 return 0;
655} 634}
656 635
636static void qla_do_work(struct work_struct *work)
637{
638 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
639 struct scsi_qla_host *vha;
640
641 vha = qla25xx_get_host(rsp);
642 qla24xx_process_response_queue(vha, rsp);
643}
644
657/* create response queue */ 645/* create response queue */
658int 646int
659qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 647qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
660 uint8_t vp_idx, uint16_t rid) 648 uint8_t vp_idx, uint16_t rid, int req)
661{ 649{
662 int ret = 0; 650 int ret = 0;
663 struct rsp_que *rsp = NULL; 651 struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
672 goto que_failed; 660 goto que_failed;
673 } 661 }
674 662
675 rsp->length = RESPONSE_ENTRY_CNT_2300; 663 rsp->length = RESPONSE_ENTRY_CNT_MQ;
676 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 664 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
677 (rsp->length + 1) * sizeof(response_t), 665 (rsp->length + 1) * sizeof(response_t),
678 &rsp->dma, GFP_KERNEL); 666 &rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
683 } 671 }
684 672
685 mutex_lock(&ha->vport_lock); 673 mutex_lock(&ha->vport_lock);
686 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 674 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
687 if (que_id >= ha->max_queues) { 675 if (que_id >= ha->max_rsp_queues) {
688 mutex_unlock(&ha->vport_lock); 676 mutex_unlock(&ha->vport_lock);
689 qla_printk(KERN_INFO, ha, "No resources to create " 677 qla_printk(KERN_INFO, ha, "No resources to create "
690 "additional response queue\n"); 678 "additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
708 if (LSB(rsp->rid)) 696 if (LSB(rsp->rid))
709 options |= BIT_5; 697 options |= BIT_5;
710 rsp->options = options; 698 rsp->options = options;
711 rsp->ring_ptr = rsp->ring;
712 rsp->ring_index = 0;
713 rsp->id = que_id; 699 rsp->id = que_id;
714 reg = ISP_QUE_REG(ha, que_id); 700 reg = ISP_QUE_REG(ha, que_id);
715 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 701 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 mutex_unlock(&ha->vport_lock); 714 mutex_unlock(&ha->vport_lock);
729 goto que_failed; 715 goto que_failed;
730 } 716 }
717 if (req >= 0)
718 rsp->req = ha->req_q_map[req];
719 else
720 rsp->req = NULL;
731 721
732 qla2x00_init_response_q_entries(rsp); 722 qla2x00_init_response_q_entries(rsp);
733 723 if (rsp->hw->wq)
724 INIT_WORK(&rsp->q_work, qla_do_work);
734 return rsp->id; 725 return rsp->id;
735 726
736que_failed: 727que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
744 uint16_t options = 0; 735 uint16_t options = 0;
745 uint8_t ret = 0; 736 uint8_t ret = 0;
746 struct qla_hw_data *ha = vha->hw; 737 struct qla_hw_data *ha = vha->hw;
738 struct rsp_que *rsp;
747 739
748 options |= BIT_1; 740 options |= BIT_1;
749 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); 741 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
750 if (!ret) { 742 if (!ret) {
751 qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); 743 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
752 return ret; 744 return ret;
753 } else 745 } else
754 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); 746 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
747 rsp = ha->rsp_q_map[ret];
755 748
756 options = 0; 749 options = 0;
757 if (qos & BIT_7) 750 if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
759 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, 752 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
760 qos & ~BIT_7); 753 qos & ~BIT_7);
761 if (ret) { 754 if (ret) {
762 vha->req_ques[0] = ret; 755 vha->req = ha->req_q_map[ret];
763 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); 756 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
764 } else 757 } else
765 qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); 758 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
759 rsp->req = ha->req_q_map[ret];
766 760
767 return ret; 761 return ret;
768} 762}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e4fdcdad80d0..dcf011679c8b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(ql2xmaxqdepth, 77MODULE_PARM_DESC(ql2xmaxqdepth,
78 "Maximum queue depth to report for target devices."); 78 "Maximum queue depth to report for target devices.");
79 79
80int ql2xqfulltracking = 1;
81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfulltracking,
83 "Controls whether the driver tracks queue full status "
84 "returns and dynamically adjusts a scsi device's queue "
85 "depth. Default is 1, perform tracking. Set to 0 to "
86 "disable dynamic tracking and adjustment of queue depth.");
87
80int ql2xqfullrampup = 120; 88int ql2xqfullrampup = 120;
81module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfullrampup, 90MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
96 "Enables MQ settings " 104 "Enables MQ settings "
97 "Default is 1 for single queue. Set it to number \ 105 "Default is 1 for single queue. Set it to number \
98 of queues in MQ mode."); 106 of queues in MQ mode.");
107
108int ql2xmultique_tag;
109module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xmultique_tag,
111 "Enables CPU affinity settings for the driver "
112 "Default is 0 for no affinity of request and response IO. "
113 "Set it to 1 to turn on the cpu affinity.");
114
115int ql2xfwloadbin;
116module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
117MODULE_PARM_DESC(ql2xfwloadbin,
118 "Option to specify location from which to load ISP firmware:\n"
119 " 2 -- load firmware via the request_firmware() (hotplug)\n"
120 " interface.\n"
121 " 1 -- load firmware from flash.\n"
122 " 0 -- use default semantics.\n");
123
99/* 124/*
100 * SCSI host template entry points 125 * SCSI host template entry points
101 */ 126 */
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 212/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 213static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 214{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 215 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 216 GFP_KERNEL);
192 if (!ha->req_q_map) { 217 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 218 qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 220 goto fail_req_map;
196 } 221 }
197 222
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 223 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 224 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 225 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 226 qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
213 return -ENOMEM; 238 return -ENOMEM;
214} 239}
215 240
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 241static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 242{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 243 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 244 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 245 (req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 249 req = NULL;
233} 250}
234 251
252static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
253{
254 if (rsp && rsp->ring)
255 dma_free_coherent(&ha->pdev->dev,
256 (rsp->length + 1) * sizeof(response_t),
257 rsp->ring, rsp->dma);
258
259 kfree(rsp);
260 rsp = NULL;
261}
262
235static void qla2x00_free_queues(struct qla_hw_data *ha) 263static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 264{
237 struct req_que *req; 265 struct req_que *req;
238 struct rsp_que *rsp; 266 struct rsp_que *rsp;
239 int cnt; 267 int cnt;
240 268
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 269 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 270 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 271 qla2x00_free_req_que(ha, req);
272 }
273 kfree(ha->req_q_map);
274 ha->req_q_map = NULL;
275
276 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
277 rsp = ha->rsp_q_map[cnt];
278 qla2x00_free_rsp_que(ha, rsp);
245 } 279 }
246 kfree(ha->rsp_q_map); 280 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL; 281 ha->rsp_q_map = NULL;
282}
248 283
249 kfree(ha->req_q_map); 284static int qla25xx_setup_mode(struct scsi_qla_host *vha)
250 ha->req_q_map = NULL; 285{
286 uint16_t options = 0;
287 int ques, req, ret;
288 struct qla_hw_data *ha = vha->hw;
289
290 if (ql2xmultique_tag) {
291 /* CPU affinity mode */
292 ha->wq = create_workqueue("qla2xxx_wq");
293 /* create a request queue for IO */
294 options |= BIT_7;
295 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
296 QLA_DEFAULT_QUE_QOS);
297 if (!req) {
298 qla_printk(KERN_WARNING, ha,
299 "Can't create request queue\n");
300 goto fail;
301 }
302 vha->req = ha->req_q_map[req];
303 options |= BIT_1;
304 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
305 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
306 if (!ret) {
307 qla_printk(KERN_WARNING, ha,
308 "Response Queue create failed\n");
309 goto fail2;
310 }
311 }
312 DEBUG2(qla_printk(KERN_INFO, ha,
313 "CPU affinity mode enabled, no. of response"
314 " queues:%d, no. of request queues:%d\n",
315 ha->max_rsp_queues, ha->max_req_queues));
316 }
317 return 0;
318fail2:
319 qla25xx_delete_queues(vha);
320fail:
321 ha->mqenable = 0;
322 return 1;
251} 323}
252 324
253static char * 325static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
387 459
388 sp->fcport = fcport; 460 sp->fcport = fcport;
389 sp->cmd = cmd; 461 sp->cmd = cmd;
390 sp->que = ha->req_q_map[0];
391 sp->flags = 0; 462 sp->flags = 0;
392 CMD_SP(cmd) = (void *)sp; 463 CMD_SP(cmd) = (void *)sp;
393 cmd->scsi_done = done; 464 cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 683void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 684qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 685{
615 int cnt, que, id; 686 int cnt;
616 unsigned long flags; 687 unsigned long flags;
617 srb_t *sp; 688 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 689 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 691 struct req_que *req;
621 692
622 spin_lock_irqsave(&ha->hardware_lock, flags); 693 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 694 req = vha->req;
624 id = vha->req_ques[que]; 695 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 696 sp = req->outstanding_cmds[cnt];
626 if (!req) 697 if (!sp)
698 continue;
699 if (sp->fcport != fcport)
627 continue; 700 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 701
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 703 if (ha->isp_ops->abort_command(sp)) {
704 DEBUG2(qla_printk(KERN_WARNING, ha,
705 "Abort failed -- %lx\n",
706 sp->cmd->serial_number));
707 } else {
708 if (qla2x00_eh_wait_on_command(sp->cmd) !=
709 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 710 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 711 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 712 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 713 }
714 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 715 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 717}
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
693 unsigned long flags; 759 unsigned long flags;
694 int wait = 0; 760 int wait = 0;
695 struct qla_hw_data *ha = vha->hw; 761 struct qla_hw_data *ha = vha->hw;
696 struct req_que *req; 762 struct req_que *req = vha->req;
697 srb_t *spt; 763 srb_t *spt;
698 764
699 qla2x00_block_error_handler(cmd); 765 qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 spt = (srb_t *) CMD_SP(cmd); 775 spt = (srb_t *) CMD_SP(cmd);
710 if (!spt) 776 if (!spt)
711 return SUCCESS; 777 return SUCCESS;
712 req = spt->que;
713 778
714 /* Check active list for command command. */ 779 /* Check active list for command command. */
715 spin_lock_irqsave(&ha->hardware_lock, flags); 780 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 791 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 792
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 794 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 795 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 796 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 797 ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
777 return status; 842 return status;
778 843
779 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
780 req = sp->que; 845 req = vha->req;
781 for (cnt = 1; status == QLA_SUCCESS && 846 for (cnt = 1; status == QLA_SUCCESS &&
782 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 847 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
783 sp = req->outstanding_cmds[cnt]; 848 sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
820 885
821static int 886static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 887__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 888 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 889{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 890 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 891 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 906 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 907 goto eh_reset_failed;
843 err = 2; 908 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 909 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
910 != QLA_SUCCESS)
845 goto eh_reset_failed; 911 goto eh_reset_failed;
846 err = 3; 912 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 913 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
996 if (qla2x00_vp_abort_isp(vha)) 1062 if (qla2x00_vp_abort_isp(vha))
997 goto eh_host_reset_lock; 1063 goto eh_host_reset_lock;
998 } else { 1064 } else {
1065 if (ha->wq)
1066 flush_workqueue(ha->wq);
1067
999 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1068 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1000 if (qla2x00_abort_isp(base_vha)) { 1069 if (qla2x00_abort_isp(base_vha)) {
1001 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1070 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1037 struct fc_port *fcport; 1106 struct fc_port *fcport;
1038 struct qla_hw_data *ha = vha->hw; 1107 struct qla_hw_data *ha = vha->hw;
1039 1108
1040 if (ha->flags.enable_lip_full_login && !vha->vp_idx) { 1109 if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
1110 !IS_QLA81XX(ha)) {
1041 ret = qla2x00_full_login_lip(vha); 1111 ret = qla2x00_full_login_lip(vha);
1042 if (ret != QLA_SUCCESS) { 1112 if (ret != QLA_SUCCESS) {
1043 DEBUG2_3(printk("%s(%ld): failed: " 1113 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1064 if (fcport->port_type != FCT_TARGET) 1134 if (fcport->port_type != FCT_TARGET)
1065 continue; 1135 continue;
1066 1136
1067 ret = ha->isp_ops->target_reset(fcport, 0); 1137 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1068 if (ret != QLA_SUCCESS) { 1138 if (ret != QLA_SUCCESS) {
1069 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1139 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1070 "target_reset=%d d_id=%x.\n", __func__, 1140 "target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1088 struct req_que *req; 1158 struct req_que *req;
1089 1159
1090 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 spin_lock_irqsave(&ha->hardware_lock, flags);
1091 for (que = 0; que < ha->max_queues; que++) { 1161 for (que = 0; que < ha->max_req_queues; que++) {
1092 req = ha->req_q_map[que]; 1162 req = ha->req_q_map[que];
1093 if (!req) 1163 if (!req)
1094 continue; 1164 continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1123 scsi_qla_host_t *vha = shost_priv(sdev->host); 1193 scsi_qla_host_t *vha = shost_priv(sdev->host);
1124 struct qla_hw_data *ha = vha->hw; 1194 struct qla_hw_data *ha = vha->hw;
1125 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1126 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1196 struct req_que *req = vha->req;
1127 1197
1128 if (sdev->tagged_supported) 1198 if (sdev->tagged_supported)
1129 scsi_activate_tcq(sdev, req->max_q_depth); 1199 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1511 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1581 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1512 break; 1582 break;
1513 } 1583 }
1584
1585 /* Get adapter physical port no from interrupt pin register. */
1586 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1587 if (ha->port_no & 1)
1588 ha->flags.port0 = 1;
1589 else
1590 ha->flags.port0 = 0;
1514} 1591}
1515 1592
1516static int 1593static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1518{ 1595{
1519 resource_size_t pio; 1596 resource_size_t pio;
1520 uint16_t msix; 1597 uint16_t msix;
1598 int cpus;
1521 1599
1522 if (pci_request_selected_regions(ha->pdev, ha->bars, 1600 if (pci_request_selected_regions(ha->pdev, ha->bars,
1523 QLA2XXX_DRIVER_NAME)) { 1601 QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
1571 } 1649 }
1572 1650
1573 /* Determine queue resources */ 1651 /* Determine queue resources */
1574 ha->max_queues = 1; 1652 ha->max_req_queues = ha->max_rsp_queues = 1;
1575 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1653 if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
1654 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1576 goto mqiobase_exit; 1655 goto mqiobase_exit;
1577 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1656 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1578 pci_resource_len(ha->pdev, 3)); 1657 pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
1582 ha->msix_count = msix; 1661 ha->msix_count = msix;
1583 /* Max queues are bounded by available msix vectors */ 1662 /* Max queues are bounded by available msix vectors */
1584 /* queue 0 uses two msix vectors */ 1663 /* queue 0 uses two msix vectors */
1585 if (ha->msix_count - 1 < ql2xmaxqueues) 1664 if (ql2xmultique_tag) {
1586 ha->max_queues = ha->msix_count - 1; 1665 cpus = num_online_cpus();
1587 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1666 ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
1588 ha->max_queues = QLA_MQ_SIZE; 1667 (cpus + 1) : (ha->msix_count - 1);
1589 else 1668 ha->max_req_queues = 2;
1590 ha->max_queues = ql2xmaxqueues; 1669 } else if (ql2xmaxqueues > 1) {
1670 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1671 QLA_MQ_SIZE : ql2xmaxqueues;
1672 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1673 " of request queues:%d\n", ha->max_req_queues));
1674 }
1591 qla_printk(KERN_INFO, ha, 1675 qla_printk(KERN_INFO, ha,
1592 "MSI-X vector count: %d\n", msix); 1676 "MSI-X vector count: %d\n", msix);
1593 } 1677 } else
1678 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1594 1679
1595mqiobase_exit: 1680mqiobase_exit:
1596 ha->msix_count = ha->max_queues + 1; 1681 ha->msix_count = ha->max_rsp_queues + 1;
1597 return (0); 1682 return (0);
1598 1683
1599iospace_error_exit: 1684iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1605{ 1690{
1606 scsi_qla_host_t *vha = shost_priv(shost); 1691 scsi_qla_host_t *vha = shost_priv(shost);
1607 1692
1693 if (vha->hw->flags.running_gold_fw)
1694 return;
1695
1608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1696 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1697 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1610 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1698 set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1768 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 1856 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1769 ha->gid_list_info_size = 8; 1857 ha->gid_list_info_size = 8;
1770 ha->optrom_size = OPTROM_SIZE_81XX; 1858 ha->optrom_size = OPTROM_SIZE_81XX;
1859 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1771 ha->isp_ops = &qla81xx_isp_ops; 1860 ha->isp_ops = &qla81xx_isp_ops;
1772 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 1861 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1773 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 1862 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1803 1892
1804 ret = -ENOMEM; 1893 ret = -ENOMEM;
1805 qla2x00_mem_free(ha); 1894 qla2x00_mem_free(ha);
1806 qla2x00_free_que(ha, req, rsp); 1895 qla2x00_free_req_que(ha, req);
1896 qla2x00_free_rsp_que(ha, rsp);
1807 goto probe_hw_failed; 1897 goto probe_hw_failed;
1808 } 1898 }
1809 1899
1810 pci_set_drvdata(pdev, base_vha); 1900 pci_set_drvdata(pdev, base_vha);
1811 1901
1812 host = base_vha->host; 1902 host = base_vha->host;
1813 base_vha->req_ques[0] = req->id; 1903 base_vha->req = req;
1814 host->can_queue = req->length + 128; 1904 host->can_queue = req->length + 128;
1815 if (IS_QLA2XXX_MIDTYPE(ha)) 1905 if (IS_QLA2XXX_MIDTYPE(ha))
1816 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1906 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1841 } 1931 }
1842 ha->rsp_q_map[0] = rsp; 1932 ha->rsp_q_map[0] = rsp;
1843 ha->req_q_map[0] = req; 1933 ha->req_q_map[0] = req;
1844 1934 rsp->req = req;
1935 req->rsp = rsp;
1936 set_bit(0, ha->req_qid_map);
1937 set_bit(0, ha->rsp_qid_map);
1845 /* FWI2-capable only. */ 1938 /* FWI2-capable only. */
1846 req->req_q_in = &ha->iobase->isp24.req_q_in; 1939 req->req_q_in = &ha->iobase->isp24.req_q_in;
1847 req->req_q_out = &ha->iobase->isp24.req_q_out; 1940 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1866 goto probe_failed; 1959 goto probe_failed;
1867 } 1960 }
1868 1961
1962 if (ha->mqenable)
1963 if (qla25xx_setup_mode(base_vha))
1964 qla_printk(KERN_WARNING, ha,
1965 "Can't create queues, falling back to single"
1966 " queue mode\n");
1967
1968 if (ha->flags.running_gold_fw)
1969 goto skip_dpc;
1970
1869 /* 1971 /*
1870 * Startup the kernel thread for this host adapter 1972 * Startup the kernel thread for this host adapter
1871 */ 1973 */
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1878 goto probe_failed; 1980 goto probe_failed;
1879 } 1981 }
1880 1982
1983skip_dpc:
1881 list_add_tail(&base_vha->list, &ha->vp_list); 1984 list_add_tail(&base_vha->list, &ha->vp_list);
1882 base_vha->host->irq = ha->pdev->irq; 1985 base_vha->host->irq = ha->pdev->irq;
1883 1986
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1917 return 0; 2020 return 0;
1918 2021
1919probe_init_failed: 2022probe_init_failed:
1920 qla2x00_free_que(ha, req, rsp); 2023 qla2x00_free_req_que(ha, req);
1921 ha->max_queues = 0; 2024 qla2x00_free_rsp_que(ha, rsp);
2025 ha->max_req_queues = ha->max_rsp_queues = 0;
1922 2026
1923probe_failed: 2027probe_failed:
1924 if (base_vha->timer_active) 2028 if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
1976 2080
1977 base_vha->flags.online = 0; 2081 base_vha->flags.online = 0;
1978 2082
2083 /* Flush the work queue and remove it */
2084 if (ha->wq) {
2085 flush_workqueue(ha->wq);
2086 destroy_workqueue(ha->wq);
2087 ha->wq = NULL;
2088 }
2089
1979 /* Kill the kernel thread for this host */ 2090 /* Kill the kernel thread for this host */
1980 if (ha->dpc_thread) { 2091 if (ha->dpc_thread) {
1981 struct task_struct *t = ha->dpc_thread; 2092 struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2017{ 2128{
2018 struct qla_hw_data *ha = vha->hw; 2129 struct qla_hw_data *ha = vha->hw;
2019 2130
2131 qla25xx_delete_queues(vha);
2132
2020 if (ha->flags.fce_enabled) 2133 if (ha->flags.fce_enabled)
2021 qla2x00_disable_fce_trace(vha, NULL, NULL); 2134 qla2x00_disable_fce_trace(vha, NULL, NULL);
2022 2135
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2329 vfree(ha->fw_dump); 2442 vfree(ha->fw_dump);
2330 } 2443 }
2331 2444
2445 if (ha->dcbx_tlv)
2446 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2447 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2448
2449 if (ha->xgmac_data)
2450 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2451 ha->xgmac_data, ha->xgmac_data_dma);
2452
2332 if (ha->sns_cmd) 2453 if (ha->sns_cmd)
2333 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2454 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2334 ha->sns_cmd, ha->sns_cmd_dma); 2455 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2412 INIT_LIST_HEAD(&vha->work_list); 2533 INIT_LIST_HEAD(&vha->work_list);
2413 INIT_LIST_HEAD(&vha->list); 2534 INIT_LIST_HEAD(&vha->list);
2414 2535
2536 spin_lock_init(&vha->work_lock);
2537
2415 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 2538 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2416 return vha; 2539 return vha;
2417 2540
@@ -2420,13 +2543,11 @@ fail:
2420} 2543}
2421 2544
2422static struct qla_work_evt * 2545static struct qla_work_evt *
2423qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, 2546qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2424 int locked)
2425{ 2547{
2426 struct qla_work_evt *e; 2548 struct qla_work_evt *e;
2427 2549
2428 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2550 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2429 GFP_KERNEL);
2430 if (!e) 2551 if (!e)
2431 return NULL; 2552 return NULL;
2432 2553
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2437} 2558}
2438 2559
2439static int 2560static int
2440qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) 2561qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2441{ 2562{
2442 unsigned long uninitialized_var(flags); 2563 unsigned long flags;
2443 struct qla_hw_data *ha = vha->hw;
2444 2564
2445 if (!locked) 2565 spin_lock_irqsave(&vha->work_lock, flags);
2446 spin_lock_irqsave(&ha->hardware_lock, flags);
2447 list_add_tail(&e->list, &vha->work_list); 2566 list_add_tail(&e->list, &vha->work_list);
2567 spin_unlock_irqrestore(&vha->work_lock, flags);
2448 qla2xxx_wake_dpc(vha); 2568 qla2xxx_wake_dpc(vha);
2449 if (!locked) 2569
2450 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2451 return QLA_SUCCESS; 2570 return QLA_SUCCESS;
2452} 2571}
2453 2572
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2457{ 2576{
2458 struct qla_work_evt *e; 2577 struct qla_work_evt *e;
2459 2578
2460 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); 2579 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2461 if (!e) 2580 if (!e)
2462 return QLA_FUNCTION_FAILED; 2581 return QLA_FUNCTION_FAILED;
2463 2582
2464 e->u.aen.code = code; 2583 e->u.aen.code = code;
2465 e->u.aen.data = data; 2584 e->u.aen.data = data;
2466 return qla2x00_post_work(vha, e, 1); 2585 return qla2x00_post_work(vha, e);
2467} 2586}
2468 2587
2469int 2588int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2471{ 2590{
2472 struct qla_work_evt *e; 2591 struct qla_work_evt *e;
2473 2592
2474 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); 2593 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2475 if (!e) 2594 if (!e)
2476 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2477 2596
2478 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 2597 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2479 return qla2x00_post_work(vha, e, 1); 2598 return qla2x00_post_work(vha, e);
2480} 2599}
2481 2600
2482static void 2601static void
2483qla2x00_do_work(struct scsi_qla_host *vha) 2602qla2x00_do_work(struct scsi_qla_host *vha)
2484{ 2603{
2485 struct qla_work_evt *e; 2604 struct qla_work_evt *e, *tmp;
2486 struct qla_hw_data *ha = vha->hw; 2605 unsigned long flags;
2606 LIST_HEAD(work);
2487 2607
2488 spin_lock_irq(&ha->hardware_lock); 2608 spin_lock_irqsave(&vha->work_lock, flags);
2489 while (!list_empty(&vha->work_list)) { 2609 list_splice_init(&vha->work_list, &work);
2490 e = list_entry(vha->work_list.next, struct qla_work_evt, list); 2610 spin_unlock_irqrestore(&vha->work_lock, flags);
2611
2612 list_for_each_entry_safe(e, tmp, &work, list) {
2491 list_del_init(&e->list); 2613 list_del_init(&e->list);
2492 spin_unlock_irq(&ha->hardware_lock);
2493 2614
2494 switch (e->type) { 2615 switch (e->type) {
2495 case QLA_EVT_AEN: 2616 case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2502 } 2623 }
2503 if (e->flags & QLA_EVT_FLAG_FREE) 2624 if (e->flags & QLA_EVT_FLAG_FREE)
2504 kfree(e); 2625 kfree(e);
2505 spin_lock_irq(&ha->hardware_lock);
2506 } 2626 }
2507 spin_unlock_irq(&ha->hardware_lock);
2508} 2627}
2628
2509/* Relogins all the fcports of a vport 2629/* Relogins all the fcports of a vport
2510 * Context: dpc thread 2630 * Context: dpc thread
2511 */ 2631 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 152ecfc26cd2..6260505dceb5 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
219 wait_cnt = NVR_WAIT_CNT; 219 wait_cnt = NVR_WAIT_CNT;
220 do { 220 do {
221 if (!--wait_cnt) { 221 if (!--wait_cnt) {
222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(qla_printk(KERN_WARNING, ha,
223 __func__, vha->host_no)); 223 "NVRAM didn't go ready...\n"));
224 break; 224 break;
225 } 225 }
226 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
349 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
350 do { 350 do {
351 if (!--wait_cnt) { 351 if (!--wait_cnt) {
352 DEBUG9_10(qla_printk( 352 DEBUG9_10(qla_printk(KERN_WARNING, ha,
353 "NVRAM didn't go ready...\n")); 353 "NVRAM didn't go ready...\n"));
354 break; 354 break;
355 } 355 }
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
408 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
409 do { 409 do {
410 if (!--wait_cnt) { 410 if (!--wait_cnt) {
411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); 411 DEBUG9_10(qla_printk(KERN_WARNING, ha,
412 "NVRAM didn't go ready...\n"));
412 break; 413 break;
413 } 414 }
414 NVRAM_DELAY(); 415 NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
701 break; 702 break;
702 case FLT_REG_VPD_0: 703 case FLT_REG_VPD_0:
703 ha->flt_region_vpd_nvram = start; 704 ha->flt_region_vpd_nvram = start;
704 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 705 if (ha->flags.port0)
705 ha->flt_region_vpd = start; 706 ha->flt_region_vpd = start;
706 break; 707 break;
707 case FLT_REG_VPD_1: 708 case FLT_REG_VPD_1:
708 if (PCI_FUNC(ha->pdev->devfn) & 1) 709 if (!ha->flags.port0)
709 ha->flt_region_vpd = start; 710 ha->flt_region_vpd = start;
710 break; 711 break;
711 case FLT_REG_NVRAM_0: 712 case FLT_REG_NVRAM_0:
712 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 713 if (ha->flags.port0)
713 ha->flt_region_nvram = start; 714 ha->flt_region_nvram = start;
714 break; 715 break;
715 case FLT_REG_NVRAM_1: 716 case FLT_REG_NVRAM_1:
716 if (PCI_FUNC(ha->pdev->devfn) & 1) 717 if (!ha->flags.port0)
717 ha->flt_region_nvram = start; 718 ha->flt_region_nvram = start;
718 break; 719 break;
719 case FLT_REG_FDT: 720 case FLT_REG_FDT:
720 ha->flt_region_fdt = start; 721 ha->flt_region_fdt = start;
721 break; 722 break;
722 case FLT_REG_NPIV_CONF_0: 723 case FLT_REG_NPIV_CONF_0:
723 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 724 if (ha->flags.port0)
724 ha->flt_region_npiv_conf = start; 725 ha->flt_region_npiv_conf = start;
725 break; 726 break;
726 case FLT_REG_NPIV_CONF_1: 727 case FLT_REG_NPIV_CONF_1:
727 if (PCI_FUNC(ha->pdev->devfn) & 1) 728 if (!ha->flags.port0)
728 ha->flt_region_npiv_conf = start; 729 ha->flt_region_npiv_conf = start;
729 break; 730 break;
731 case FLT_REG_GOLD_FW:
732 ha->flt_region_gold_fw = start;
733 break;
730 } 734 }
731 } 735 }
732 goto done; 736 goto done;
@@ -744,12 +748,12 @@ no_flash_data:
744 ha->flt_region_fw = def_fw[def]; 748 ha->flt_region_fw = def_fw[def];
745 ha->flt_region_boot = def_boot[def]; 749 ha->flt_region_boot = def_boot[def];
746 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 750 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
747 ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 751 ha->flt_region_vpd = ha->flags.port0 ?
748 def_vpd0[def]: def_vpd1[def]; 752 def_vpd0[def]: def_vpd1[def];
749 ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 753 ha->flt_region_nvram = ha->flags.port0 ?
750 def_nvram0[def]: def_nvram1[def]; 754 def_nvram0[def]: def_nvram1[def];
751 ha->flt_region_fdt = def_fdt[def]; 755 ha->flt_region_fdt = def_fdt[def];
752 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 756 ha->flt_region_npiv_conf = ha->flags.port0 ?
753 def_npiv_conf0[def]: def_npiv_conf1[def]; 757 def_npiv_conf0[def]: def_npiv_conf1[def];
754done: 758done:
755 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 759 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
924 struct fc_vport_identifiers vid; 928 struct fc_vport_identifiers vid;
925 struct fc_vport *vport; 929 struct fc_vport *vport;
926 930
931 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
932
927 flags = le16_to_cpu(entry->flags); 933 flags = le16_to_cpu(entry->flags);
928 if (flags == 0xffff) 934 if (flags == 0xffff)
929 continue; 935 continue;
@@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
937 vid.port_name = wwn_to_u64(entry->port_name); 943 vid.port_name = wwn_to_u64(entry->port_name);
938 vid.node_name = wwn_to_u64(entry->node_name); 944 vid.node_name = wwn_to_u64(entry->node_name);
939 945
940 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); 946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
941
942 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
943 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
944 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), 948 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
945 entry->q_qos, entry->f_qos)); 949 entry->q_qos, entry->f_qos));
@@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
955 } 959 }
956done: 960done:
957 kfree(data); 961 kfree(data);
958 ha->npiv_info = NULL;
959} 962}
960 963
961static int 964static int
@@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1079 0xff0000) | ((fdata >> 16) & 0xff)); 1082 0xff0000) | ((fdata >> 16) & 0xff));
1080 ret = qla24xx_erase_sector(vha, fdata); 1083 ret = qla24xx_erase_sector(vha, fdata);
1081 if (ret != QLA_SUCCESS) { 1084 if (ret != QLA_SUCCESS) {
1082 DEBUG9(qla_printk("Unable to erase sector: " 1085 DEBUG9(qla_printk(KERN_WARNING, ha,
1083 "address=%x.\n", faddr)); 1086 "Unable to erase sector: address=%x.\n",
1087 faddr));
1084 break; 1088 break;
1085 } 1089 }
1086 } 1090 }
@@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1240 ret = qla24xx_write_flash_dword(ha, 1244 ret = qla24xx_write_flash_dword(ha,
1241 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1245 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1242 if (ret != QLA_SUCCESS) { 1246 if (ret != QLA_SUCCESS) {
1243 DEBUG9(qla_printk("Unable to program nvram address=%x " 1247 DEBUG9(qla_printk(KERN_WARNING, ha,
1244 "data=%x.\n", naddr, *dwptr)); 1248 "Unable to program nvram address=%x data=%x.\n",
1249 naddr, *dwptr));
1245 break; 1250 break;
1246 } 1251 }
1247 } 1252 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 19d1afc3a343..b63feaf43126 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k1" 10#define QLA2XXX_VERSION "8.03.01-k3"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 166417a6afba..2de5f3ad640b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1225 * @starget: SCSI target pointer 1225 * @starget: SCSI target pointer
1226 * @lun: SCSI Logical Unit Number 1226 * @lun: SCSI Logical Unit Number
1227 * 1227 *
1228 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1228 * Description: Looks up the scsi_device with the specified @lun for a given
1229 * for a given host. The returned scsi_device has an additional reference that 1229 * @starget. The returned scsi_device has an additional reference that
1230 * needs to be released with scsi_device_put once you're done with it. 1230 * needs to be released with scsi_device_put once you're done with it.
1231 **/ 1231 **/
1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 213123b0486b..41a21772df12 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
887static sector_t get_sdebug_capacity(void) 887static sector_t get_sdebug_capacity(void)
888{ 888{
889 if (scsi_debug_virtual_gb > 0) 889 if (scsi_debug_virtual_gb > 0)
890 return 2048 * 1024 * scsi_debug_virtual_gb; 890 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
891 else 891 else
892 return sdebug_store_sectors; 892 return sdebug_store_sectors;
893} 893}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0c2c73be1974..a1689353d7fd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
641/** 641/**
642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory 642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
643 * @scmd: SCSI command structure to restore 643 * @scmd: SCSI command structure to restore
644 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd 644 * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
645 * 645 *
646 * Undo any damage done by above scsi_prep_eh_cmnd(). 646 * Undo any damage done by above scsi_eh_prep_cmnd().
647 */ 647 */
648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
649{ 649{
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
1451 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
1452 * 1452 *
1453 * Locking: 1453 * Locking:
1454 * We must be called from process context; scsi_allocate_request() 1454 * We must be called from process context.
1455 * may sleep.
1456 * 1455 *
1457 * Notes: 1456 * Notes:
1458 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1457 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1459 * head of the devices request queue, and continue. 1458 * head of the devices request queue, and continue.
1460 *
1461 * Bugs:
1462 * scsi_allocate_request() may sleep waiting for existing requests to
1463 * be processed. However, since we haven't kicked off any request
1464 * processing for this host, this may deadlock.
1465 *
1466 * If scsi_allocate_request() fails for what ever reason, we
1467 * completely forget to lock the door.
1468 */ 1459 */
1469static void scsi_eh_lock_door(struct scsi_device *sdev) 1460static void scsi_eh_lock_door(struct scsi_device *sdev)
1470{ 1461{
1471 struct request *req; 1462 struct request *req;
1472 1463
1464 /*
1465 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1466 * request becomes available
1467 */
1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1468 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1474 if (!req)
1475 return;
1476 1469
1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1470 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1478 req->cmd[1] = 0; 1471 req->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dd3f9d2b99fd..30f3275e119e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2412,20 +2412,18 @@ int
2412scsi_internal_device_unblock(struct scsi_device *sdev) 2412scsi_internal_device_unblock(struct scsi_device *sdev)
2413{ 2413{
2414 struct request_queue *q = sdev->request_queue; 2414 struct request_queue *q = sdev->request_queue;
2415 int err;
2416 unsigned long flags; 2415 unsigned long flags;
2417 2416
2418 /* 2417 /*
2419 * Try to transition the scsi device to SDEV_RUNNING 2418 * Try to transition the scsi device to SDEV_RUNNING
2420 * and goose the device queue if successful. 2419 * and goose the device queue if successful.
2421 */ 2420 */
2422 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2421 if (sdev->sdev_state == SDEV_BLOCK)
2423 if (err) { 2422 sdev->sdev_state = SDEV_RUNNING;
2424 err = scsi_device_set_state(sdev, SDEV_CREATED); 2423 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2425 2424 sdev->sdev_state = SDEV_CREATED;
2426 if (err) 2425 else
2427 return err; 2426 return -EINVAL;
2428 }
2429 2427
2430 spin_lock_irqsave(q->queue_lock, flags); 2428 spin_lock_irqsave(q->queue_lock, flags);
2431 blk_start_queue(q); 2429 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e2b50d8f57a8..c44783801402 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
115 "REPORT LUNS maximum number of LUNS received (should be" 115 "REPORT LUNS maximum number of LUNS received (should be"
116 " between 1 and 16384)"); 116 " between 1 and 16384)");
117 117
118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; 118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
119 119
120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(inq_timeout, 121MODULE_PARM_DESC(inq_timeout,
122 "Timeout (in seconds) waiting for devices to answer INQUIRY." 122 "Timeout (in seconds) waiting for devices to answer INQUIRY."
123 " Default is 5. Some non-compliant devices need more."); 123 " Default is 20. Some devices may need more; most need less.");
124 124
125/* This lock protects only this list */ 125/* This lock protects only this list */
126static DEFINE_SPINLOCK(async_scan_lock); 126static DEFINE_SPINLOCK(async_scan_lock);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a2ce7b6325c..f3e664628d7a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
37#define ISCSI_TRANSPORT_VERSION "2.0-870" 37#define ISCSI_TRANSPORT_VERSION "2.0-870"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid;
41 struct scsi_transport_template t; 40 struct scsi_transport_template t;
42 struct iscsi_transport *iscsi_transport; 41 struct iscsi_transport *iscsi_transport;
43 struct list_head list; 42 struct list_head list;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
938} 937}
939 938
940static int 939static int
941iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) 940iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
942{ 941{
943 return netlink_broadcast(nls, skb, 0, 1, gfp); 942 return nlmsg_multicast(nls, skb, 0, group, gfp);
944}
945
946static int
947iscsi_unicast_skb(struct sk_buff *skb, int pid)
948{
949 int rc;
950
951 rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
952 if (rc < 0) {
953 printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
954 return rc;
955 }
956
957 return 0;
958} 943}
959 944
960int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 945int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
980 return -ENOMEM; 965 return -ENOMEM;
981 } 966 }
982 967
983 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 968 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
984 ev = NLMSG_DATA(nlh); 969 ev = NLMSG_DATA(nlh);
985 memset(ev, 0, sizeof(*ev)); 970 memset(ev, 0, sizeof(*ev));
986 ev->transport_handle = iscsi_handle(conn->transport); 971 ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
991 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 976 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
992 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 977 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
993 978
994 return iscsi_unicast_skb(skb, priv->daemon_pid); 979 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
995} 980}
996EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 981EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
997 982
983int iscsi_offload_mesg(struct Scsi_Host *shost,
984 struct iscsi_transport *transport, uint32_t type,
985 char *data, uint16_t data_size)
986{
987 struct nlmsghdr *nlh;
988 struct sk_buff *skb;
989 struct iscsi_uevent *ev;
990 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
991
992 skb = alloc_skb(len, GFP_NOIO);
993 if (!skb) {
994 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
995 return -ENOMEM;
996 }
997
998 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
999 ev = NLMSG_DATA(nlh);
1000 memset(ev, 0, sizeof(*ev));
1001 ev->type = type;
1002 ev->transport_handle = iscsi_handle(transport);
1003 switch (type) {
1004 case ISCSI_KEVENT_PATH_REQ:
1005 ev->r.req_path.host_no = shost->host_no;
1006 break;
1007 case ISCSI_KEVENT_IF_DOWN:
1008 ev->r.notify_if_down.host_no = shost->host_no;
1009 break;
1010 }
1011
1012 memcpy((char *)ev + sizeof(*ev), data, data_size);
1013
1014 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
1015}
1016EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1017
998void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) 1018void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
999{ 1019{
1000 struct nlmsghdr *nlh; 1020 struct nlmsghdr *nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1014 return; 1034 return;
1015 } 1035 }
1016 1036
1017 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1037 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1018 ev = NLMSG_DATA(nlh); 1038 ev = NLMSG_DATA(nlh);
1019 ev->transport_handle = iscsi_handle(conn->transport); 1039 ev->transport_handle = iscsi_handle(conn->transport);
1020 ev->type = ISCSI_KEVENT_CONN_ERROR; 1040 ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1022 ev->r.connerror.cid = conn->cid; 1042 ev->r.connerror.cid = conn->cid;
1023 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 1043 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
1024 1044
1025 iscsi_broadcast_skb(skb, GFP_ATOMIC); 1045 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
1026 1046
1027 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 1047 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
1028 error); 1048 error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1030EXPORT_SYMBOL_GPL(iscsi_conn_error_event); 1050EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1031 1051
1032static int 1052static int
1033iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, 1053iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1034 void *payload, int size) 1054 void *payload, int size)
1035{ 1055{
1036 struct sk_buff *skb; 1056 struct sk_buff *skb;
1037 struct nlmsghdr *nlh; 1057 struct nlmsghdr *nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
1045 return -ENOMEM; 1065 return -ENOMEM;
1046 } 1066 }
1047 1067
1048 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); 1068 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
1049 nlh->nlmsg_flags = flags; 1069 nlh->nlmsg_flags = flags;
1050 memcpy(NLMSG_DATA(nlh), payload, size); 1070 memcpy(NLMSG_DATA(nlh), payload, size);
1051 return iscsi_unicast_skb(skb, pid); 1071 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
1052} 1072}
1053 1073
1054static int 1074static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1085 return -ENOMEM; 1105 return -ENOMEM;
1086 } 1106 }
1087 1107
1088 nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, 1108 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
1089 (len - sizeof(*nlhstat)), 0); 1109 (len - sizeof(*nlhstat)), 0);
1090 evstat = NLMSG_DATA(nlhstat); 1110 evstat = NLMSG_DATA(nlhstat);
1091 memset(evstat, 0, sizeof(*evstat)); 1111 memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1109 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 1129 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
1110 nlhstat->nlmsg_len = actual_size; 1130 nlhstat->nlmsg_len = actual_size;
1111 1131
1112 err = iscsi_unicast_skb(skbstat, priv->daemon_pid); 1132 err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
1133 GFP_ATOMIC);
1113 } while (err < 0 && err != -ECONNREFUSED); 1134 } while (err < 0 && err != -ECONNREFUSED);
1114 1135
1115 return err; 1136 return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1143 return -ENOMEM; 1164 return -ENOMEM;
1144 } 1165 }
1145 1166
1146 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1167 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1147 ev = NLMSG_DATA(nlh); 1168 ev = NLMSG_DATA(nlh);
1148 ev->transport_handle = iscsi_handle(session->transport); 1169 ev->transport_handle = iscsi_handle(session->transport);
1149 1170
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1172 * this will occur if the daemon is not up, so we just warn 1193 * this will occur if the daemon is not up, so we just warn
1173 * the user and when the daemon is restarted it will handle it 1194 * the user and when the daemon is restarted it will handle it
1174 */ 1195 */
1175 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 1196 rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1176 if (rc == -ESRCH) 1197 if (rc == -ESRCH)
1177 iscsi_cls_session_printk(KERN_ERR, session, 1198 iscsi_cls_session_printk(KERN_ERR, session,
1178 "Cannot notify userspace of session " 1199 "Cannot notify userspace of session "
@@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1268 return err; 1289 return err;
1269} 1290}
1270 1291
1292static int iscsi_if_ep_connect(struct iscsi_transport *transport,
1293 struct iscsi_uevent *ev, int msg_type)
1294{
1295 struct iscsi_endpoint *ep;
1296 struct sockaddr *dst_addr;
1297 struct Scsi_Host *shost = NULL;
1298 int non_blocking, err = 0;
1299
1300 if (!transport->ep_connect)
1301 return -EINVAL;
1302
1303 if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
1304 shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
1305 if (!shost) {
1306 printk(KERN_ERR "ep connect failed. Could not find "
1307 "host no %u\n",
1308 ev->u.ep_connect_through_host.host_no);
1309 return -ENODEV;
1310 }
1311 non_blocking = ev->u.ep_connect_through_host.non_blocking;
1312 } else
1313 non_blocking = ev->u.ep_connect.non_blocking;
1314
1315 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1316 ep = transport->ep_connect(shost, dst_addr, non_blocking);
1317 if (IS_ERR(ep)) {
1318 err = PTR_ERR(ep);
1319 goto release_host;
1320 }
1321
1322 ev->r.ep_connect_ret.handle = ep->id;
1323release_host:
1324 if (shost)
1325 scsi_host_put(shost);
1326 return err;
1327}
1328
1271static int 1329static int
1272iscsi_if_transport_ep(struct iscsi_transport *transport, 1330iscsi_if_transport_ep(struct iscsi_transport *transport,
1273 struct iscsi_uevent *ev, int msg_type) 1331 struct iscsi_uevent *ev, int msg_type)
1274{ 1332{
1275 struct iscsi_endpoint *ep; 1333 struct iscsi_endpoint *ep;
1276 struct sockaddr *dst_addr;
1277 int rc = 0; 1334 int rc = 0;
1278 1335
1279 switch (msg_type) { 1336 switch (msg_type) {
1337 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1280 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1338 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1281 if (!transport->ep_connect) 1339 rc = iscsi_if_ep_connect(transport, ev, msg_type);
1282 return -EINVAL;
1283
1284 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1285 ep = transport->ep_connect(dst_addr,
1286 ev->u.ep_connect.non_blocking);
1287 if (IS_ERR(ep))
1288 return PTR_ERR(ep);
1289
1290 ev->r.ep_connect_ret.handle = ep->id;
1291 break; 1340 break;
1292 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1341 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1293 if (!transport->ep_poll) 1342 if (!transport->ep_poll)
@@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1365} 1414}
1366 1415
1367static int 1416static int
1368iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1417iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1418{
1419 struct Scsi_Host *shost;
1420 struct iscsi_path *params;
1421 int err;
1422
1423 if (!transport->set_path)
1424 return -ENOSYS;
1425
1426 shost = scsi_host_lookup(ev->u.set_path.host_no);
1427 if (!shost) {
1428 printk(KERN_ERR "set path could not find host no %u\n",
1429 ev->u.set_path.host_no);
1430 return -ENODEV;
1431 }
1432
1433 params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
1434 err = transport->set_path(shost, params);
1435
1436 scsi_host_put(shost);
1437 return err;
1438}
1439
1440static int
1441iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1369{ 1442{
1370 int err = 0; 1443 int err = 0;
1371 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 1444 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1375 struct iscsi_cls_conn *conn; 1448 struct iscsi_cls_conn *conn;
1376 struct iscsi_endpoint *ep = NULL; 1449 struct iscsi_endpoint *ep = NULL;
1377 1450
1451 if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
1452 *group = ISCSI_NL_GRP_UIP;
1453 else
1454 *group = ISCSI_NL_GRP_ISCSID;
1455
1378 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1456 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1379 if (!priv) 1457 if (!priv)
1380 return -EINVAL; 1458 return -EINVAL;
@@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1383 if (!try_module_get(transport->owner)) 1461 if (!try_module_get(transport->owner))
1384 return -EINVAL; 1462 return -EINVAL;
1385 1463
1386 priv->daemon_pid = NETLINK_CREDS(skb)->pid;
1387
1388 switch (nlh->nlmsg_type) { 1464 switch (nlh->nlmsg_type) {
1389 case ISCSI_UEVENT_CREATE_SESSION: 1465 case ISCSI_UEVENT_CREATE_SESSION:
1390 err = iscsi_if_create_session(priv, ep, ev, 1466 err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1469 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1545 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1470 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1546 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1471 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1547 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1548 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1472 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1549 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1473 break; 1550 break;
1474 case ISCSI_UEVENT_TGT_DSCVR: 1551 case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1477 case ISCSI_UEVENT_SET_HOST_PARAM: 1554 case ISCSI_UEVENT_SET_HOST_PARAM:
1478 err = iscsi_set_host_param(transport, ev); 1555 err = iscsi_set_host_param(transport, ev);
1479 break; 1556 break;
1557 case ISCSI_UEVENT_PATH_UPDATE:
1558 err = iscsi_set_path(transport, ev);
1559 break;
1480 default: 1560 default:
1481 err = -ENOSYS; 1561 err = -ENOSYS;
1482 break; 1562 break;
@@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
1499 uint32_t rlen; 1579 uint32_t rlen;
1500 struct nlmsghdr *nlh; 1580 struct nlmsghdr *nlh;
1501 struct iscsi_uevent *ev; 1581 struct iscsi_uevent *ev;
1582 uint32_t group;
1502 1583
1503 nlh = nlmsg_hdr(skb); 1584 nlh = nlmsg_hdr(skb);
1504 if (nlh->nlmsg_len < sizeof(*nlh) || 1585 if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
1511 if (rlen > skb->len) 1592 if (rlen > skb->len)
1512 rlen = skb->len; 1593 rlen = skb->len;
1513 1594
1514 err = iscsi_if_recv_msg(skb, nlh); 1595 err = iscsi_if_recv_msg(skb, nlh, &group);
1515 if (err) { 1596 if (err) {
1516 ev->type = ISCSI_KEVENT_IF_ERROR; 1597 ev->type = ISCSI_KEVENT_IF_ERROR;
1517 ev->iferror = err; 1598 ev->iferror = err;
@@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
1525 */ 1606 */
1526 if (ev->type == ISCSI_UEVENT_GET_STATS && !err) 1607 if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
1527 break; 1608 break;
1528 err = iscsi_if_send_reply( 1609 err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
1529 NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
1530 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 1610 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
1531 } while (err < 0 && err != -ECONNREFUSED); 1611 } while (err < 0 && err != -ECONNREFUSED);
1532 skb_pull(skb, rlen); 1612 skb_pull(skb, rlen);
@@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1774 if (!priv) 1854 if (!priv)
1775 return NULL; 1855 return NULL;
1776 INIT_LIST_HEAD(&priv->list); 1856 INIT_LIST_HEAD(&priv->list);
1777 priv->daemon_pid = -1;
1778 priv->iscsi_transport = tt; 1857 priv->iscsi_transport = tt;
1779 priv->t.user_scan = iscsi_user_scan; 1858 priv->t.user_scan = iscsi_user_scan;
1780 priv->t.create_work_queue = 1; 1859 priv->t.create_work_queue = 1;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bcf3bd40bbd5..878b17a9af30 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1902 index = sdkp->index; 1902 index = sdkp->index;
1903 dev = &sdp->sdev_gendev; 1903 dev = &sdp->sdev_gendev;
1904 1904
1905 if (!sdp->request_queue->rq_timeout) {
1906 if (sdp->type != TYPE_MOD)
1907 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1908 else
1909 blk_queue_rq_timeout(sdp->request_queue,
1910 SD_MOD_TIMEOUT);
1911 }
1912
1913 device_initialize(&sdkp->dev);
1914 sdkp->dev.parent = &sdp->sdev_gendev;
1915 sdkp->dev.class = &sd_disk_class;
1916 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1917
1918 if (device_add(&sdkp->dev))
1919 goto out_free_index;
1920
1921 get_device(&sdp->sdev_gendev);
1922
1923 if (index < SD_MAX_DISKS) { 1905 if (index < SD_MAX_DISKS) {
1924 gd->major = sd_major((index & 0xf0) >> 4); 1906 gd->major = sd_major((index & 0xf0) >> 4);
1925 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1907 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1954 1936
1955 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1937 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1956 sdp->removable ? "removable " : ""); 1938 sdp->removable ? "removable " : "");
1957
1958 return;
1959
1960 out_free_index:
1961 ida_remove(&sd_index_ida, index);
1962} 1939}
1963 1940
1964/** 1941/**
@@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev)
2026 sdkp->openers = 0; 2003 sdkp->openers = 0;
2027 sdkp->previous_state = 1; 2004 sdkp->previous_state = 1;
2028 2005
2006 if (!sdp->request_queue->rq_timeout) {
2007 if (sdp->type != TYPE_MOD)
2008 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2009 else
2010 blk_queue_rq_timeout(sdp->request_queue,
2011 SD_MOD_TIMEOUT);
2012 }
2013
2014 device_initialize(&sdkp->dev);
2015 sdkp->dev.parent = &sdp->sdev_gendev;
2016 sdkp->dev.class = &sd_disk_class;
2017 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
2018
2019 if (device_add(&sdkp->dev))
2020 goto out_free_index;
2021
2022 get_device(&sdp->sdev_gendev);
2023
2029 async_schedule(sd_probe_async, sdkp); 2024 async_schedule(sd_probe_async, sdkp);
2030 2025
2031 return 0; 2026 return 0;
@@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev)
2055 **/ 2050 **/
2056static int sd_remove(struct device *dev) 2051static int sd_remove(struct device *dev)
2057{ 2052{
2058 struct scsi_disk *sdkp = dev_get_drvdata(dev); 2053 struct scsi_disk *sdkp;
2059 2054
2055 async_synchronize_full();
2056 sdkp = dev_get_drvdata(dev);
2060 device_del(&sdkp->dev); 2057 device_del(&sdkp->dev);
2061 del_gendisk(sdkp->disk); 2058 del_gendisk(sdkp->disk);
2062 sd_shutdown(dev); 2059 sd_shutdown(dev);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 89bd438e1fe3..b33d04250bbc 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2964 !(STp->use_pf & PF_TESTED)) { 2964 !(STp->use_pf & PF_TESTED)) {
2965 /* Try the other possible state of Page Format if not 2965 /* Try the other possible state of Page Format if not
2966 already tried */ 2966 already tried */
2967 STp->use_pf = !STp->use_pf | PF_TESTED; 2967 STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
2968 st_release_request(SRpnt); 2968 st_release_request(SRpnt);
2969 SRpnt = NULL; 2969 SRpnt = NULL;
2970 return st_int_ioctl(STp, cmd_in, arg); 2970 return st_int_ioctl(STp, cmd_in, arg);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 583966ec8266..45374d66d26a 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
737 struct sym_hcb *np = sym_get_hcb(sdev->host); 737 struct sym_hcb *np = sym_get_hcb(sdev->host);
738 struct sym_tcb *tp = &np->target[sdev->id]; 738 struct sym_tcb *tp = &np->target[sdev->id];
739 struct sym_lcb *lp; 739 struct sym_lcb *lp;
740 unsigned long flags;
741 int error;
740 742
741 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 743 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
742 return -ENXIO; 744 return -ENXIO;
743 745
744 tp->starget = sdev->sdev_target; 746 spin_lock_irqsave(np->s.host->host_lock, flags);
747
745 /* 748 /*
746 * Fail the device init if the device is flagged NOSCAN at BOOT in 749 * Fail the device init if the device is flagged NOSCAN at BOOT in
747 * the NVRAM. This may speed up boot and maintain coherency with 750 * the NVRAM. This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
753 756
754 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 757 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
755 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 758 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
756 starget_printk(KERN_INFO, tp->starget, 759 starget_printk(KERN_INFO, sdev->sdev_target,
757 "Scan at boot disabled in NVRAM\n"); 760 "Scan at boot disabled in NVRAM\n");
758 return -ENXIO; 761 error = -ENXIO;
762 goto out;
759 } 763 }
760 764
761 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 765 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
762 if (sdev->lun != 0) 766 if (sdev->lun != 0) {
763 return -ENXIO; 767 error = -ENXIO;
764 starget_printk(KERN_INFO, tp->starget, 768 goto out;
769 }
770 starget_printk(KERN_INFO, sdev->sdev_target,
765 "Multiple LUNs disabled in NVRAM\n"); 771 "Multiple LUNs disabled in NVRAM\n");
766 } 772 }
767 773
768 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 774 lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
769 if (!lp) 775 if (!lp) {
770 return -ENOMEM; 776 error = -ENOMEM;
777 goto out;
778 }
779 if (tp->nlcb == 1)
780 tp->starget = sdev->sdev_target;
771 781
772 spi_min_period(tp->starget) = tp->usr_period; 782 spi_min_period(tp->starget) = tp->usr_period;
773 spi_max_width(tp->starget) = tp->usr_width; 783 spi_max_width(tp->starget) = tp->usr_width;
774 784
775 return 0; 785 error = 0;
786out:
787 spin_unlock_irqrestore(np->s.host->host_lock, flags);
788
789 return error;
776} 790}
777 791
778/* 792/*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
819static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 833static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
820{ 834{
821 struct sym_hcb *np = sym_get_hcb(sdev->host); 835 struct sym_hcb *np = sym_get_hcb(sdev->host);
822 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 836 struct sym_tcb *tp = &np->target[sdev->id];
837 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
838 unsigned long flags;
839
840 spin_lock_irqsave(np->s.host->host_lock, flags);
841
842 if (lp->busy_itlq || lp->busy_itl) {
843 /*
844 * This really shouldn't happen, but we can't return an error
845 * so let's try to stop all on-going I/O.
846 */
847 starget_printk(KERN_WARNING, tp->starget,
848 "Removing busy LCB (%d)\n", sdev->lun);
849 sym_reset_scsi_bus(np, 1);
850 }
823 851
824 if (lp->itlq_tbl) 852 if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
825 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 853 /*
826 kfree(lp->cb_tags); 854 * It was the last unit for this target.
827 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 855 */
856 tp->head.sval = 0;
857 tp->head.wval = np->rv_scntl3;
858 tp->head.uval = 0;
859 tp->tgoal.check_nego = 1;
860 tp->starget = NULL;
861 }
862
863 spin_unlock_irqrestore(np->s.host->host_lock, flags);
828} 864}
829 865
830/* 866/*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
890 if (!((uc->target >> t) & 1)) 926 if (!((uc->target >> t) & 1))
891 continue; 927 continue;
892 tp = &np->target[t]; 928 tp = &np->target[t];
929 if (!tp->nlcb)
930 continue;
893 931
894 switch (uc->cmd) { 932 switch (uc->cmd) {
895 933
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ffa70d1ed182..69ad4945c936 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
1896 tp->head.sval = 0; 1896 tp->head.sval = 0;
1897 tp->head.wval = np->rv_scntl3; 1897 tp->head.wval = np->rv_scntl3;
1898 tp->head.uval = 0; 1898 tp->head.uval = 0;
1899 if (tp->lun0p)
1900 tp->lun0p->to_clear = 0;
1901 if (tp->lunmp) {
1902 int ln;
1903
1904 for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
1905 if (tp->lunmp[ln])
1906 tp->lunmp[ln]->to_clear = 0;
1907 }
1899 } 1908 }
1900 1909
1901 /* 1910 /*
@@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
4988 */ 4997 */
4989 if (ln && !tp->lunmp) { 4998 if (ln && !tp->lunmp) {
4990 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), 4999 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
4991 GFP_KERNEL); 5000 GFP_ATOMIC);
4992 if (!tp->lunmp) 5001 if (!tp->lunmp)
4993 goto fail; 5002 goto fail;
4994 } 5003 }
@@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
5008 tp->lun0p = lp; 5017 tp->lun0p = lp;
5009 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 5018 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
5010 } 5019 }
5020 tp->nlcb++;
5011 5021
5012 /* 5022 /*
5013 * Let the itl task point to error handling. 5023 * Let the itl task point to error handling.
@@ -5085,6 +5095,43 @@ fail:
5085} 5095}
5086 5096
5087/* 5097/*
5098 * Lun control block deallocation. Returns the number of valid remaing LCBs
5099 * for the target.
5100 */
5101int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
5102{
5103 struct sym_tcb *tp = &np->target[tn];
5104 struct sym_lcb *lp = sym_lp(tp, ln);
5105
5106 tp->nlcb--;
5107
5108 if (ln) {
5109 if (!tp->nlcb) {
5110 kfree(tp->lunmp);
5111 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5112 tp->lunmp = NULL;
5113 tp->luntbl = NULL;
5114 tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
5115 } else {
5116 tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
5117 tp->lunmp[ln] = NULL;
5118 }
5119 } else {
5120 tp->lun0p = NULL;
5121 tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
5122 }
5123
5124 if (lp->itlq_tbl) {
5125 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5126 kfree(lp->cb_tags);
5127 }
5128
5129 sym_mfree_dma(lp, sizeof(*lp), "LCB");
5130
5131 return tp->nlcb;
5132}
5133
5134/*
5088 * Queue a SCSI IO to the controller. 5135 * Queue a SCSI IO to the controller.
5089 */ 5136 */
5090int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 5137int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 9ebc8706b6bf..053e63c86822 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -401,6 +401,7 @@ struct sym_tcb {
401 * An array of bus addresses is used on reselection. 401 * An array of bus addresses is used on reselection.
402 */ 402 */
403 u32 *luntbl; /* LCBs bus address table */ 403 u32 *luntbl; /* LCBs bus address table */
404 int nlcb; /* Number of valid LCBs (including LUN #0) */
404 405
405 /* 406 /*
406 * LUN table used by the C code. 407 * LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
1065struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); 1066struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1066void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); 1067void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1067struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); 1068struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1069int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1068int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); 1070int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1069int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); 1071int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1070int sym_reset_scsi_target(struct sym_hcb *np, int target); 1072int sym_reset_scsi_target(struct sym_hcb *np, int target);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 83a185d52961..957494775413 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -212,7 +212,7 @@ config SPI_TXX9
212 212
213config SPI_XILINX 213config SPI_XILINX
214 tristate "Xilinx SPI controller" 214 tristate "Xilinx SPI controller"
215 depends on XILINX_VIRTEX && EXPERIMENTAL 215 depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
216 select SPI_BITBANG 216 select SPI_BITBANG
217 help 217 help
218 This exposes the SPI controller IP from the Xilinx EDK. 218 This exposes the SPI controller IP from the Xilinx EDK.
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index c6c816b7ecb5..5eee3f82be5d 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -22,6 +22,7 @@ config USB_ARCH_HAS_HCD
22 default y if PCMCIA && !M32R # sl811_cs 22 default y if PCMCIA && !M32R # sl811_cs
23 default y if ARM # SL-811 23 default y if ARM # SL-811
24 default y if SUPERH # r8a66597-hcd 24 default y if SUPERH # r8a66597-hcd
25 default y if MICROBLAZE
25 default PCI 26 default PCI
26 27
27# many non-PCI SOC chips embed OHCI 28# many non-PCI SOC chips embed OHCI
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0048f1185a60..74712cb8399a 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1996,7 +1996,7 @@ config FB_PS3_DEFAULT_SIZE_M
1996 1996
1997config FB_XILINX 1997config FB_XILINX
1998 tristate "Xilinx frame buffer support" 1998 tristate "Xilinx frame buffer support"
1999 depends on FB && XILINX_VIRTEX 1999 depends on FB && (XILINX_VIRTEX || MICROBLAZE)
2000 select FB_CFB_FILLRECT 2000 select FB_CFB_FILLRECT
2001 select FB_CFB_COPYAREA 2001 select FB_CFB_COPYAREA
2002 select FB_CFB_IMAGEBLIT 2002 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 35e8eb02b9e9..e4e4d433b007 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -354,7 +354,7 @@ static int default_crt_on __devinitdata = 0;
354static int default_lcd_on __devinitdata = 1; 354static int default_lcd_on __devinitdata = 1;
355 355
356#ifdef CONFIG_MTRR 356#ifdef CONFIG_MTRR
357static int mtrr = 1; 357static bool mtrr = true;
358#endif 358#endif
359 359
360#ifdef CONFIG_PMAC_BACKLIGHT 360#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 83c5cefc266c..da7c01b39be2 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1736,10 +1736,8 @@ static int __init cyber2000fb_init(void)
1736 1736
1737#ifdef CONFIG_ARCH_SHARK 1737#ifdef CONFIG_ARCH_SHARK
1738 err = cyberpro_vl_probe(); 1738 err = cyberpro_vl_probe();
1739 if (!err) { 1739 if (!err)
1740 ret = 0; 1740 ret = 0;
1741 __module_get(THIS_MODULE);
1742 }
1743#endif 1741#endif
1744#ifdef CONFIG_PCI 1742#ifdef CONFIG_PCI
1745 err = pci_register_driver(&cyberpro_driver); 1743 err = pci_register_driver(&cyberpro_driver);
@@ -1749,14 +1747,15 @@ static int __init cyber2000fb_init(void)
1749 1747
1750 return ret ? err : 0; 1748 return ret ? err : 0;
1751} 1749}
1750module_init(cyber2000fb_init);
1752 1751
1752#ifndef CONFIG_ARCH_SHARK
1753static void __exit cyberpro_exit(void) 1753static void __exit cyberpro_exit(void)
1754{ 1754{
1755 pci_unregister_driver(&cyberpro_driver); 1755 pci_unregister_driver(&cyberpro_driver);
1756} 1756}
1757
1758module_init(cyber2000fb_init);
1759module_exit(cyberpro_exit); 1757module_exit(cyberpro_exit);
1758#endif
1760 1759
1761MODULE_AUTHOR("Russell King"); 1760MODULE_AUTHOR("Russell King");
1762MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver"); 1761MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver");
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 421770b5e6ab..ca5b4643a401 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -45,7 +45,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
45static int mtrr __devinitdata = 3; /* enable mtrr by default */ 45static int mtrr __devinitdata = 3; /* enable mtrr by default */
46static int blank = 1; /* enable blanking by default */ 46static int blank = 1; /* enable blanking by default */
47static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */ 47static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
48static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */ 48static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
49static int nocrtc __devinitdata; /* ignore CRTC settings */ 49static int nocrtc __devinitdata; /* ignore CRTC settings */
50static int noedid __devinitdata; /* don't try DDC transfers */ 50static int noedid __devinitdata; /* don't try DDC transfers */
51static int vram_remap __devinitdata; /* set amt. of memory to be used */ 51static int vram_remap __devinitdata; /* set amt. of memory to be used */
@@ -2002,11 +2002,7 @@ static void __devexit uvesafb_exit(void)
2002 2002
2003module_exit(uvesafb_exit); 2003module_exit(uvesafb_exit);
2004 2004
2005static int param_get_scroll(char *buffer, struct kernel_param *kp) 2005#define param_get_scroll NULL
2006{
2007 return 0;
2008}
2009
2010static int param_set_scroll(const char *val, struct kernel_param *kp) 2006static int param_set_scroll(const char *val, struct kernel_param *kp)
2011{ 2007{
2012 ypan = 0; 2008 ypan = 0;
@@ -2017,6 +2013,8 @@ static int param_set_scroll(const char *val, struct kernel_param *kp)
2017 ypan = 1; 2013 ypan = 1;
2018 else if (!strcmp(val, "ywrap")) 2014 else if (!strcmp(val, "ywrap"))
2019 ypan = 2; 2015 ypan = 2;
2016 else
2017 return -EINVAL;
2020 2018
2021 return 0; 2019 return 0;
2022} 2020}
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 018c070a357f..3a43ebf83a49 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -31,21 +31,37 @@ static ssize_t modalias_show(struct device *_d,
31 return sprintf(buf, "virtio:d%08Xv%08X\n", 31 return sprintf(buf, "virtio:d%08Xv%08X\n",
32 dev->id.device, dev->id.vendor); 32 dev->id.device, dev->id.vendor);
33} 33}
34static ssize_t features_show(struct device *_d,
35 struct device_attribute *attr, char *buf)
36{
37 struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
38 unsigned int i;
39 ssize_t len = 0;
40
41 /* We actually represent this as a bitstring, as it could be
42 * arbitrary length in future. */
43 for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++)
44 len += sprintf(buf+len, "%c",
45 test_bit(i, dev->features) ? '1' : '0');
46 len += sprintf(buf+len, "\n");
47 return len;
48}
34static struct device_attribute virtio_dev_attrs[] = { 49static struct device_attribute virtio_dev_attrs[] = {
35 __ATTR_RO(device), 50 __ATTR_RO(device),
36 __ATTR_RO(vendor), 51 __ATTR_RO(vendor),
37 __ATTR_RO(status), 52 __ATTR_RO(status),
38 __ATTR_RO(modalias), 53 __ATTR_RO(modalias),
54 __ATTR_RO(features),
39 __ATTR_NULL 55 __ATTR_NULL
40}; 56};
41 57
42static inline int virtio_id_match(const struct virtio_device *dev, 58static inline int virtio_id_match(const struct virtio_device *dev,
43 const struct virtio_device_id *id) 59 const struct virtio_device_id *id)
44{ 60{
45 if (id->device != dev->id.device) 61 if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID)
46 return 0; 62 return 0;
47 63
48 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor; 64 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor;
49} 65}
50 66
51/* This looks through all the IDs a driver claims to support. If any of them 67/* This looks through all the IDs a driver claims to support. If any of them
@@ -118,13 +134,14 @@ static int virtio_dev_probe(struct device *_d)
118 if (device_features & (1 << i)) 134 if (device_features & (1 << i))
119 set_bit(i, dev->features); 135 set_bit(i, dev->features);
120 136
137 dev->config->finalize_features(dev);
138
121 err = drv->probe(dev); 139 err = drv->probe(dev);
122 if (err) 140 if (err)
123 add_status(dev, VIRTIO_CONFIG_S_FAILED); 141 add_status(dev, VIRTIO_CONFIG_S_FAILED);
124 else { 142 else
125 dev->config->finalize_features(dev);
126 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 143 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
127 } 144
128 return err; 145 return err;
129} 146}
130 147
@@ -185,6 +202,8 @@ int register_virtio_device(struct virtio_device *dev)
185 /* Acknowledge that we've seen the device. */ 202 /* Acknowledge that we've seen the device. */
186 add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 203 add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
187 204
205 INIT_LIST_HEAD(&dev->vqs);
206
188 /* device_register() causes the bus infrastructure to look for a 207 /* device_register() causes the bus infrastructure to look for a
189 * matching driver. */ 208 * matching driver. */
190 err = device_register(&dev->dev); 209 err = device_register(&dev->dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9c76a061a04d..26b278264796 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -204,6 +204,9 @@ static int balloon(void *_vballoon)
204static int virtballoon_probe(struct virtio_device *vdev) 204static int virtballoon_probe(struct virtio_device *vdev)
205{ 205{
206 struct virtio_balloon *vb; 206 struct virtio_balloon *vb;
207 struct virtqueue *vqs[2];
208 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
209 const char *names[] = { "inflate", "deflate" };
207 int err; 210 int err;
208 211
209 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 212 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
@@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev)
218 vb->vdev = vdev; 221 vb->vdev = vdev;
219 222
220 /* We expect two virtqueues. */ 223 /* We expect two virtqueues. */
221 vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); 224 err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
222 if (IS_ERR(vb->inflate_vq)) { 225 if (err)
223 err = PTR_ERR(vb->inflate_vq);
224 goto out_free_vb; 226 goto out_free_vb;
225 }
226 227
227 vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); 228 vb->inflate_vq = vqs[0];
228 if (IS_ERR(vb->deflate_vq)) { 229 vb->deflate_vq = vqs[1];
229 err = PTR_ERR(vb->deflate_vq);
230 goto out_del_inflate_vq;
231 }
232 230
233 vb->thread = kthread_run(balloon, vb, "vballoon"); 231 vb->thread = kthread_run(balloon, vb, "vballoon");
234 if (IS_ERR(vb->thread)) { 232 if (IS_ERR(vb->thread)) {
235 err = PTR_ERR(vb->thread); 233 err = PTR_ERR(vb->thread);
236 goto out_del_deflate_vq; 234 goto out_del_vqs;
237 } 235 }
238 236
239 vb->tell_host_first 237 vb->tell_host_first
@@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
241 239
242 return 0; 240 return 0;
243 241
244out_del_deflate_vq: 242out_del_vqs:
245 vdev->config->del_vq(vb->deflate_vq); 243 vdev->config->del_vqs(vdev);
246out_del_inflate_vq:
247 vdev->config->del_vq(vb->inflate_vq);
248out_free_vb: 244out_free_vb:
249 kfree(vb); 245 kfree(vb);
250out: 246out:
@@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
264 /* Now we reset the device so we can clean up the queues. */ 260 /* Now we reset the device so we can clean up the queues. */
265 vdev->config->reset(vdev); 261 vdev->config->reset(vdev);
266 262
267 vdev->config->del_vq(vb->deflate_vq); 263 vdev->config->del_vqs(vdev);
268 vdev->config->del_vq(vb->inflate_vq);
269 kfree(vb); 264 kfree(vb);
270} 265}
271 266
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 330aacbdec1f..193c8f0e5cc5 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -42,6 +42,26 @@ struct virtio_pci_device
42 /* a list of queues so we can dispatch IRQs */ 42 /* a list of queues so we can dispatch IRQs */
43 spinlock_t lock; 43 spinlock_t lock;
44 struct list_head virtqueues; 44 struct list_head virtqueues;
45
46 /* MSI-X support */
47 int msix_enabled;
48 int intx_enabled;
49 struct msix_entry *msix_entries;
50 /* Name strings for interrupts. This size should be enough,
51 * and I'm too lazy to allocate each name separately. */
52 char (*msix_names)[256];
53 /* Number of available vectors */
54 unsigned msix_vectors;
55 /* Vectors allocated */
56 unsigned msix_used_vectors;
57};
58
59/* Constants for MSI-X */
60/* Use first vector for configuration changes, second and the rest for
61 * virtqueues Thus, we need at least 2 vectors for MSI. */
62enum {
63 VP_MSIX_CONFIG_VECTOR = 0,
64 VP_MSIX_VQ_VECTOR = 1,
45}; 65};
46 66
47struct virtio_pci_vq_info 67struct virtio_pci_vq_info
@@ -60,6 +80,9 @@ struct virtio_pci_vq_info
60 80
61 /* the list node for the virtqueues list */ 81 /* the list node for the virtqueues list */
62 struct list_head node; 82 struct list_head node;
83
84 /* MSI-X vector (or none) */
85 unsigned vector;
63}; 86};
64 87
65/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ 88/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
109 void *buf, unsigned len) 132 void *buf, unsigned len)
110{ 133{
111 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 134 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
112 void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; 135 void __iomem *ioaddr = vp_dev->ioaddr +
136 VIRTIO_PCI_CONFIG(vp_dev) + offset;
113 u8 *ptr = buf; 137 u8 *ptr = buf;
114 int i; 138 int i;
115 139
@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
123 const void *buf, unsigned len) 147 const void *buf, unsigned len)
124{ 148{
125 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 149 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
126 void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; 150 void __iomem *ioaddr = vp_dev->ioaddr +
151 VIRTIO_PCI_CONFIG(vp_dev) + offset;
127 const u8 *ptr = buf; 152 const u8 *ptr = buf;
128 int i; 153 int i;
129 154
@@ -164,6 +189,37 @@ static void vp_notify(struct virtqueue *vq)
164 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); 189 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
165} 190}
166 191
192/* Handle a configuration change: Tell driver if it wants to know. */
193static irqreturn_t vp_config_changed(int irq, void *opaque)
194{
195 struct virtio_pci_device *vp_dev = opaque;
196 struct virtio_driver *drv;
197 drv = container_of(vp_dev->vdev.dev.driver,
198 struct virtio_driver, driver);
199
200 if (drv && drv->config_changed)
201 drv->config_changed(&vp_dev->vdev);
202 return IRQ_HANDLED;
203}
204
205/* Notify all virtqueues on an interrupt. */
206static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
207{
208 struct virtio_pci_device *vp_dev = opaque;
209 struct virtio_pci_vq_info *info;
210 irqreturn_t ret = IRQ_NONE;
211 unsigned long flags;
212
213 spin_lock_irqsave(&vp_dev->lock, flags);
214 list_for_each_entry(info, &vp_dev->virtqueues, node) {
215 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
216 ret = IRQ_HANDLED;
217 }
218 spin_unlock_irqrestore(&vp_dev->lock, flags);
219
220 return ret;
221}
222
167/* A small wrapper to also acknowledge the interrupt when it's handled. 223/* A small wrapper to also acknowledge the interrupt when it's handled.
168 * I really need an EIO hook for the vring so I can ack the interrupt once we 224 * I really need an EIO hook for the vring so I can ack the interrupt once we
169 * know that we'll be handling the IRQ but before we invoke the callback since 225 * know that we'll be handling the IRQ but before we invoke the callback since
@@ -173,9 +229,6 @@ static void vp_notify(struct virtqueue *vq)
173static irqreturn_t vp_interrupt(int irq, void *opaque) 229static irqreturn_t vp_interrupt(int irq, void *opaque)
174{ 230{
175 struct virtio_pci_device *vp_dev = opaque; 231 struct virtio_pci_device *vp_dev = opaque;
176 struct virtio_pci_vq_info *info;
177 irqreturn_t ret = IRQ_NONE;
178 unsigned long flags;
179 u8 isr; 232 u8 isr;
180 233
181 /* reading the ISR has the effect of also clearing it so it's very 234 /* reading the ISR has the effect of also clearing it so it's very
@@ -187,34 +240,137 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
187 return IRQ_NONE; 240 return IRQ_NONE;
188 241
189 /* Configuration change? Tell driver if it wants to know. */ 242 /* Configuration change? Tell driver if it wants to know. */
190 if (isr & VIRTIO_PCI_ISR_CONFIG) { 243 if (isr & VIRTIO_PCI_ISR_CONFIG)
191 struct virtio_driver *drv; 244 vp_config_changed(irq, opaque);
192 drv = container_of(vp_dev->vdev.dev.driver,
193 struct virtio_driver, driver);
194 245
195 if (drv && drv->config_changed) 246 return vp_vring_interrupt(irq, opaque);
196 drv->config_changed(&vp_dev->vdev); 247}
248
249static void vp_free_vectors(struct virtio_device *vdev)
250{
251 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
252 int i;
253
254 if (vp_dev->intx_enabled) {
255 free_irq(vp_dev->pci_dev->irq, vp_dev);
256 vp_dev->intx_enabled = 0;
197 } 257 }
198 258
199 spin_lock_irqsave(&vp_dev->lock, flags); 259 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
200 list_for_each_entry(info, &vp_dev->virtqueues, node) { 260 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
201 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) 261 vp_dev->msix_used_vectors = 0;
202 ret = IRQ_HANDLED; 262
263 if (vp_dev->msix_enabled) {
264 /* Disable the vector used for configuration */
265 iowrite16(VIRTIO_MSI_NO_VECTOR,
266 vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
267 /* Flush the write out to device */
268 ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
269
270 vp_dev->msix_enabled = 0;
271 pci_disable_msix(vp_dev->pci_dev);
203 } 272 }
204 spin_unlock_irqrestore(&vp_dev->lock, flags); 273}
205 274
206 return ret; 275static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
276 int *options, int noptions)
277{
278 int i;
279 for (i = 0; i < noptions; ++i)
280 if (!pci_enable_msix(dev, entries, options[i]))
281 return options[i];
282 return -EBUSY;
283}
284
285static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
286{
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 const char *name = dev_name(&vp_dev->vdev.dev);
289 unsigned i, v;
290 int err = -ENOMEM;
291 /* We want at most one vector per queue and one for config changes.
292 * Fallback to separate vectors for config and a shared for queues.
293 * Finally fall back to regular interrupts. */
294 int options[] = { max_vqs + 1, 2 };
295 int nvectors = max(options[0], options[1]);
296
297 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
298 GFP_KERNEL);
299 if (!vp_dev->msix_entries)
300 goto error_entries;
301 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
302 GFP_KERNEL);
303 if (!vp_dev->msix_names)
304 goto error_names;
305
306 for (i = 0; i < nvectors; ++i)
307 vp_dev->msix_entries[i].entry = i;
308
309 err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries,
310 options, ARRAY_SIZE(options));
311 if (err < 0) {
312 /* Can't allocate enough MSI-X vectors, use regular interrupt */
313 vp_dev->msix_vectors = 0;
314 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
315 IRQF_SHARED, name, vp_dev);
316 if (err)
317 goto error_irq;
318 vp_dev->intx_enabled = 1;
319 } else {
320 vp_dev->msix_vectors = err;
321 vp_dev->msix_enabled = 1;
322
323 /* Set the vector used for configuration */
324 v = vp_dev->msix_used_vectors;
325 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
326 "%s-config", name);
327 err = request_irq(vp_dev->msix_entries[v].vector,
328 vp_config_changed, 0, vp_dev->msix_names[v],
329 vp_dev);
330 if (err)
331 goto error_irq;
332 ++vp_dev->msix_used_vectors;
333
334 iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
335 /* Verify we had enough resources to assign the vector */
336 v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
337 if (v == VIRTIO_MSI_NO_VECTOR) {
338 err = -EBUSY;
339 goto error_irq;
340 }
341 }
342
343 if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) {
344 /* Shared vector for all VQs */
345 v = vp_dev->msix_used_vectors;
346 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
347 "%s-virtqueues", name);
348 err = request_irq(vp_dev->msix_entries[v].vector,
349 vp_vring_interrupt, 0, vp_dev->msix_names[v],
350 vp_dev);
351 if (err)
352 goto error_irq;
353 ++vp_dev->msix_used_vectors;
354 }
355 return 0;
356error_irq:
357 vp_free_vectors(vdev);
358 kfree(vp_dev->msix_names);
359error_names:
360 kfree(vp_dev->msix_entries);
361error_entries:
362 return err;
207} 363}
208 364
209/* the config->find_vq() implementation */
210static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, 365static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
211 void (*callback)(struct virtqueue *vq)) 366 void (*callback)(struct virtqueue *vq),
367 const char *name)
212{ 368{
213 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 369 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
214 struct virtio_pci_vq_info *info; 370 struct virtio_pci_vq_info *info;
215 struct virtqueue *vq; 371 struct virtqueue *vq;
216 unsigned long flags, size; 372 unsigned long flags, size;
217 u16 num; 373 u16 num, vector;
218 int err; 374 int err;
219 375
220 /* Select the queue we're interested in */ 376 /* Select the queue we're interested in */
@@ -233,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
233 389
234 info->queue_index = index; 390 info->queue_index = index;
235 info->num = num; 391 info->num = num;
392 info->vector = VIRTIO_MSI_NO_VECTOR;
236 393
237 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); 394 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
238 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); 395 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
@@ -247,7 +404,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
247 404
248 /* create the vring */ 405 /* create the vring */
249 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, 406 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
250 vdev, info->queue, vp_notify, callback); 407 vdev, info->queue, vp_notify, callback, name);
251 if (!vq) { 408 if (!vq) {
252 err = -ENOMEM; 409 err = -ENOMEM;
253 goto out_activate_queue; 410 goto out_activate_queue;
@@ -256,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
256 vq->priv = info; 413 vq->priv = info;
257 info->vq = vq; 414 info->vq = vq;
258 415
416 /* allocate per-vq vector if available and necessary */
417 if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) {
418 vector = vp_dev->msix_used_vectors;
419 snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
420 "%s-%s", dev_name(&vp_dev->vdev.dev), name);
421 err = request_irq(vp_dev->msix_entries[vector].vector,
422 vring_interrupt, 0,
423 vp_dev->msix_names[vector], vq);
424 if (err)
425 goto out_request_irq;
426 info->vector = vector;
427 ++vp_dev->msix_used_vectors;
428 } else
429 vector = VP_MSIX_VQ_VECTOR;
430
431 if (callback && vp_dev->msix_enabled) {
432 iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
433 vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
434 if (vector == VIRTIO_MSI_NO_VECTOR) {
435 err = -EBUSY;
436 goto out_assign;
437 }
438 }
439
259 spin_lock_irqsave(&vp_dev->lock, flags); 440 spin_lock_irqsave(&vp_dev->lock, flags);
260 list_add(&info->node, &vp_dev->virtqueues); 441 list_add(&info->node, &vp_dev->virtqueues);
261 spin_unlock_irqrestore(&vp_dev->lock, flags); 442 spin_unlock_irqrestore(&vp_dev->lock, flags);
262 443
263 return vq; 444 return vq;
264 445
446out_assign:
447 if (info->vector != VIRTIO_MSI_NO_VECTOR) {
448 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
449 --vp_dev->msix_used_vectors;
450 }
451out_request_irq:
452 vring_del_virtqueue(vq);
265out_activate_queue: 453out_activate_queue:
266 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 454 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
267 free_pages_exact(info->queue, size); 455 free_pages_exact(info->queue, size);
@@ -270,21 +458,27 @@ out_info:
270 return ERR_PTR(err); 458 return ERR_PTR(err);
271} 459}
272 460
273/* the config->del_vq() implementation */
274static void vp_del_vq(struct virtqueue *vq) 461static void vp_del_vq(struct virtqueue *vq)
275{ 462{
276 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 463 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
277 struct virtio_pci_vq_info *info = vq->priv; 464 struct virtio_pci_vq_info *info = vq->priv;
278 unsigned long flags, size; 465 unsigned long size;
279 466
280 spin_lock_irqsave(&vp_dev->lock, flags); 467 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
281 list_del(&info->node); 468
282 spin_unlock_irqrestore(&vp_dev->lock, flags); 469 if (info->vector != VIRTIO_MSI_NO_VECTOR)
470 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
471
472 if (vp_dev->msix_enabled) {
473 iowrite16(VIRTIO_MSI_NO_VECTOR,
474 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
475 /* Flush the write out to device */
476 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
477 }
283 478
284 vring_del_virtqueue(vq); 479 vring_del_virtqueue(vq);
285 480
286 /* Select and deactivate the queue */ 481 /* Select and deactivate the queue */
287 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
288 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 482 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
289 483
290 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); 484 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
@@ -292,14 +486,57 @@ static void vp_del_vq(struct virtqueue *vq)
292 kfree(info); 486 kfree(info);
293} 487}
294 488
489/* the config->del_vqs() implementation */
490static void vp_del_vqs(struct virtio_device *vdev)
491{
492 struct virtqueue *vq, *n;
493
494 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
495 vp_del_vq(vq);
496
497 vp_free_vectors(vdev);
498}
499
500/* the config->find_vqs() implementation */
501static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
502 struct virtqueue *vqs[],
503 vq_callback_t *callbacks[],
504 const char *names[])
505{
506 int vectors = 0;
507 int i, err;
508
509 /* How many vectors would we like? */
510 for (i = 0; i < nvqs; ++i)
511 if (callbacks[i])
512 ++vectors;
513
514 err = vp_request_vectors(vdev, vectors);
515 if (err)
516 goto error_request;
517
518 for (i = 0; i < nvqs; ++i) {
519 vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]);
520 if (IS_ERR(vqs[i]))
521 goto error_find;
522 }
523 return 0;
524
525error_find:
526 vp_del_vqs(vdev);
527
528error_request:
529 return PTR_ERR(vqs[i]);
530}
531
295static struct virtio_config_ops virtio_pci_config_ops = { 532static struct virtio_config_ops virtio_pci_config_ops = {
296 .get = vp_get, 533 .get = vp_get,
297 .set = vp_set, 534 .set = vp_set,
298 .get_status = vp_get_status, 535 .get_status = vp_get_status,
299 .set_status = vp_set_status, 536 .set_status = vp_set_status,
300 .reset = vp_reset, 537 .reset = vp_reset,
301 .find_vq = vp_find_vq, 538 .find_vqs = vp_find_vqs,
302 .del_vq = vp_del_vq, 539 .del_vqs = vp_del_vqs,
303 .get_features = vp_get_features, 540 .get_features = vp_get_features,
304 .finalize_features = vp_finalize_features, 541 .finalize_features = vp_finalize_features,
305}; 542};
@@ -310,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d)
310 struct virtio_pci_device *vp_dev = to_vp_device(dev); 547 struct virtio_pci_device *vp_dev = to_vp_device(dev);
311 struct pci_dev *pci_dev = vp_dev->pci_dev; 548 struct pci_dev *pci_dev = vp_dev->pci_dev;
312 549
313 free_irq(pci_dev->irq, vp_dev); 550 vp_del_vqs(dev);
314 pci_set_drvdata(pci_dev, NULL); 551 pci_set_drvdata(pci_dev, NULL);
315 pci_iounmap(pci_dev, vp_dev->ioaddr); 552 pci_iounmap(pci_dev, vp_dev->ioaddr);
316 pci_release_regions(pci_dev); 553 pci_release_regions(pci_dev);
@@ -369,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
369 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 606 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
370 vp_dev->vdev.id.device = pci_dev->subsystem_device; 607 vp_dev->vdev.id.device = pci_dev->subsystem_device;
371 608
372 /* register a handler for the queue with the PCI device's interrupt */
373 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
374 dev_name(&vp_dev->vdev.dev), vp_dev);
375 if (err)
376 goto out_set_drvdata;
377
378 /* finally register the virtio device */ 609 /* finally register the virtio device */
379 err = register_virtio_device(&vp_dev->vdev); 610 err = register_virtio_device(&vp_dev->vdev);
380 if (err) 611 if (err)
381 goto out_req_irq; 612 goto out_set_drvdata;
382 613
383 return 0; 614 return 0;
384 615
385out_req_irq:
386 free_irq(pci_dev->irq, vp_dev);
387out_set_drvdata: 616out_set_drvdata:
388 pci_set_drvdata(pci_dev, NULL); 617 pci_set_drvdata(pci_dev, NULL);
389 pci_iounmap(pci_dev, vp_dev->ioaddr); 618 pci_iounmap(pci_dev, vp_dev->ioaddr);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c52369ab9bb..a882f2606515 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,21 +23,30 @@
23 23
24#ifdef DEBUG 24#ifdef DEBUG
25/* For development, we want to crash whenever the ring is screwed. */ 25/* For development, we want to crash whenever the ring is screwed. */
26#define BAD_RING(_vq, fmt...) \ 26#define BAD_RING(_vq, fmt, args...) \
27 do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) 27 do { \
28 dev_err(&(_vq)->vq.vdev->dev, \
29 "%s:"fmt, (_vq)->vq.name, ##args); \
30 BUG(); \
31 } while (0)
28/* Caller is supposed to guarantee no reentry. */ 32/* Caller is supposed to guarantee no reentry. */
29#define START_USE(_vq) \ 33#define START_USE(_vq) \
30 do { \ 34 do { \
31 if ((_vq)->in_use) \ 35 if ((_vq)->in_use) \
32 panic("in_use = %i\n", (_vq)->in_use); \ 36 panic("%s:in_use = %i\n", \
37 (_vq)->vq.name, (_vq)->in_use); \
33 (_vq)->in_use = __LINE__; \ 38 (_vq)->in_use = __LINE__; \
34 mb(); \ 39 mb(); \
35 } while(0) 40 } while (0)
36#define END_USE(_vq) \ 41#define END_USE(_vq) \
37 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) 42 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
38#else 43#else
39#define BAD_RING(_vq, fmt...) \ 44#define BAD_RING(_vq, fmt, args...) \
40 do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) 45 do { \
46 dev_err(&_vq->vq.vdev->dev, \
47 "%s:"fmt, (_vq)->vq.name, ##args); \
48 (_vq)->broken = true; \
49 } while (0)
41#define START_USE(vq) 50#define START_USE(vq)
42#define END_USE(vq) 51#define END_USE(vq)
43#endif 52#endif
@@ -52,6 +61,9 @@ struct vring_virtqueue
52 /* Other side has made a mess, don't try any more. */ 61 /* Other side has made a mess, don't try any more. */
53 bool broken; 62 bool broken;
54 63
64 /* Host supports indirect buffers */
65 bool indirect;
66
55 /* Number of free buffers */ 67 /* Number of free buffers */
56 unsigned int num_free; 68 unsigned int num_free;
57 /* Head of free buffer list. */ 69 /* Head of free buffer list. */
@@ -76,6 +88,55 @@ struct vring_virtqueue
76 88
77#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 89#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
78 90
91/* Set up an indirect table of descriptors and add it to the queue. */
92static int vring_add_indirect(struct vring_virtqueue *vq,
93 struct scatterlist sg[],
94 unsigned int out,
95 unsigned int in)
96{
97 struct vring_desc *desc;
98 unsigned head;
99 int i;
100
101 desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
102 if (!desc)
103 return vq->vring.num;
104
105 /* Transfer entries from the sg list into the indirect page */
106 for (i = 0; i < out; i++) {
107 desc[i].flags = VRING_DESC_F_NEXT;
108 desc[i].addr = sg_phys(sg);
109 desc[i].len = sg->length;
110 desc[i].next = i+1;
111 sg++;
112 }
113 for (; i < (out + in); i++) {
114 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
115 desc[i].addr = sg_phys(sg);
116 desc[i].len = sg->length;
117 desc[i].next = i+1;
118 sg++;
119 }
120
121 /* Last one doesn't continue. */
122 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
123 desc[i-1].next = 0;
124
125 /* We're about to use a buffer */
126 vq->num_free--;
127
128 /* Use a single buffer which doesn't continue */
129 head = vq->free_head;
130 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
131 vq->vring.desc[head].addr = virt_to_phys(desc);
132 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
133
134 /* Update free pointer */
135 vq->free_head = vq->vring.desc[head].next;
136
137 return head;
138}
139
79static int vring_add_buf(struct virtqueue *_vq, 140static int vring_add_buf(struct virtqueue *_vq,
80 struct scatterlist sg[], 141 struct scatterlist sg[],
81 unsigned int out, 142 unsigned int out,
@@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq,
85 struct vring_virtqueue *vq = to_vvq(_vq); 146 struct vring_virtqueue *vq = to_vvq(_vq);
86 unsigned int i, avail, head, uninitialized_var(prev); 147 unsigned int i, avail, head, uninitialized_var(prev);
87 148
149 START_USE(vq);
150
88 BUG_ON(data == NULL); 151 BUG_ON(data == NULL);
152
153 /* If the host supports indirect descriptor tables, and we have multiple
154 * buffers, then go indirect. FIXME: tune this threshold */
155 if (vq->indirect && (out + in) > 1 && vq->num_free) {
156 head = vring_add_indirect(vq, sg, out, in);
157 if (head != vq->vring.num)
158 goto add_head;
159 }
160
89 BUG_ON(out + in > vq->vring.num); 161 BUG_ON(out + in > vq->vring.num);
90 BUG_ON(out + in == 0); 162 BUG_ON(out + in == 0);
91 163
92 START_USE(vq);
93
94 if (vq->num_free < out + in) { 164 if (vq->num_free < out + in) {
95 pr_debug("Can't add buf len %i - avail = %i\n", 165 pr_debug("Can't add buf len %i - avail = %i\n",
96 out + in, vq->num_free); 166 out + in, vq->num_free);
@@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq,
127 /* Update free pointer */ 197 /* Update free pointer */
128 vq->free_head = i; 198 vq->free_head = i;
129 199
200add_head:
130 /* Set token. */ 201 /* Set token. */
131 vq->data[head] = data; 202 vq->data[head] = data;
132 203
@@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
170 241
171 /* Put back on free list: find end */ 242 /* Put back on free list: find end */
172 i = head; 243 i = head;
244
245 /* Free the indirect table */
246 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
247 kfree(phys_to_virt(vq->vring.desc[i].addr));
248
173 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 249 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
174 i = vq->vring.desc[i].next; 250 i = vq->vring.desc[i].next;
175 vq->num_free++; 251 vq->num_free++;
@@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
284 struct virtio_device *vdev, 360 struct virtio_device *vdev,
285 void *pages, 361 void *pages,
286 void (*notify)(struct virtqueue *), 362 void (*notify)(struct virtqueue *),
287 void (*callback)(struct virtqueue *)) 363 void (*callback)(struct virtqueue *),
364 const char *name)
288{ 365{
289 struct vring_virtqueue *vq; 366 struct vring_virtqueue *vq;
290 unsigned int i; 367 unsigned int i;
@@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
303 vq->vq.callback = callback; 380 vq->vq.callback = callback;
304 vq->vq.vdev = vdev; 381 vq->vq.vdev = vdev;
305 vq->vq.vq_ops = &vring_vq_ops; 382 vq->vq.vq_ops = &vring_vq_ops;
383 vq->vq.name = name;
306 vq->notify = notify; 384 vq->notify = notify;
307 vq->broken = false; 385 vq->broken = false;
308 vq->last_used_idx = 0; 386 vq->last_used_idx = 0;
309 vq->num_added = 0; 387 vq->num_added = 0;
388 list_add_tail(&vq->vq.list, &vdev->vqs);
310#ifdef DEBUG 389#ifdef DEBUG
311 vq->in_use = false; 390 vq->in_use = false;
312#endif 391#endif
313 392
393 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
394
314 /* No callback? Tell other side not to bother us. */ 395 /* No callback? Tell other side not to bother us. */
315 if (!callback) 396 if (!callback)
316 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 397 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
@@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue);
327 408
328void vring_del_virtqueue(struct virtqueue *vq) 409void vring_del_virtqueue(struct virtqueue *vq)
329{ 410{
411 list_del(&vq->list);
330 kfree(to_vvq(vq)); 412 kfree(to_vvq(vq));
331} 413}
332EXPORT_SYMBOL_GPL(vring_del_virtqueue); 414EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev)
338 420
339 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 421 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
340 switch (i) { 422 switch (i) {
423 case VIRTIO_RING_F_INDIRECT_DESC:
424 break;
341 default: 425 default:
342 /* We don't understand this bit. */ 426 /* We don't understand this bit. */
343 clear_bit(i, vdev->features); 427 clear_bit(i, vdev->features);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index fddc2025dece..10d03d7931c4 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -43,7 +43,7 @@ static int xen_suspend(void *data)
43 if (err) { 43 if (err) {
44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
45 err); 45 err);
46 device_power_up(PMSG_RESUME); 46 dpm_resume_noirq(PMSG_RESUME);
47 return err; 47 return err;
48 } 48 }
49 49
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
69 } 69 }
70 70
71 sysdev_resume(); 71 sysdev_resume();
72 device_power_up(PMSG_RESUME); 72 dpm_resume_noirq(PMSG_RESUME);
73 73
74 return 0; 74 return 0;
75} 75}
@@ -92,18 +92,18 @@ static void do_suspend(void)
92 } 92 }
93#endif 93#endif
94 94
95 err = device_suspend(PMSG_SUSPEND); 95 err = dpm_suspend_start(PMSG_SUSPEND);
96 if (err) { 96 if (err) {
97 printk(KERN_ERR "xen suspend: device_suspend %d\n", err); 97 printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
98 goto out; 98 goto out;
99 } 99 }
100 100
101 printk(KERN_DEBUG "suspending xenstore...\n"); 101 printk(KERN_DEBUG "suspending xenstore...\n");
102 xs_suspend(); 102 xs_suspend();
103 103
104 err = device_power_down(PMSG_SUSPEND); 104 err = dpm_suspend_noirq(PMSG_SUSPEND);
105 if (err) { 105 if (err) {
106 printk(KERN_ERR "device_power_down failed: %d\n", err); 106 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
107 goto resume_devices; 107 goto resume_devices;
108 } 108 }
109 109
@@ -119,10 +119,10 @@ static void do_suspend(void)
119 } else 119 } else
120 xs_suspend_cancel(); 120 xs_suspend_cancel();
121 121
122 device_power_up(PMSG_RESUME); 122 dpm_resume_noirq(PMSG_RESUME);
123 123
124resume_devices: 124resume_devices:
125 device_resume(PMSG_RESUME); 125 dpm_resume_end(PMSG_RESUME);
126 126
127 /* Make sure timer events get retriggered on all CPUs */ 127 /* Make sure timer events get retriggered on all CPUs */
128 clock_was_set(); 128 clock_was_set();
diff --git a/fs/Kconfig b/fs/Kconfig
index 9f7270f36b2a..525da2e8f73b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -62,6 +62,16 @@ source "fs/autofs/Kconfig"
62source "fs/autofs4/Kconfig" 62source "fs/autofs4/Kconfig"
63source "fs/fuse/Kconfig" 63source "fs/fuse/Kconfig"
64 64
65config CUSE
66 tristate "Character device in Userpace support"
67 depends on FUSE_FS
68 help
69 This FUSE extension allows character devices to be
70 implemented in userspace.
71
72 If you want to develop or use userspace character device
73 based on CUSE, answer Y or M.
74
65config GENERIC_ACL 75config GENERIC_ACL
66 bool 76 bool
67 select FS_POSIX_ACL 77 select FS_POSIX_ACL
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 858fba14aaa6..c4dfa1dcc86f 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -49,7 +49,8 @@ static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
49 spin_unlock(&ls->ls_recover_list_lock); 49 spin_unlock(&ls->ls_recover_list_lock);
50 50
51 if (!found) 51 if (!found)
52 de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_KERNEL); 52 de = kzalloc(sizeof(struct dlm_direntry) + len,
53 ls->ls_allocation);
53 return de; 54 return de;
54} 55}
55 56
@@ -211,7 +212,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
211 212
212 dlm_dir_clear(ls); 213 dlm_dir_clear(ls);
213 214
214 last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_KERNEL); 215 last_name = kmalloc(DLM_RESNAME_MAXLEN, ls->ls_allocation);
215 if (!last_name) 216 if (!last_name)
216 goto out; 217 goto out;
217 218
@@ -322,7 +323,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
322 if (namelen > DLM_RESNAME_MAXLEN) 323 if (namelen > DLM_RESNAME_MAXLEN)
323 return -EINVAL; 324 return -EINVAL;
324 325
325 de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL); 326 de = kzalloc(sizeof(struct dlm_direntry) + namelen, ls->ls_allocation);
326 if (!de) 327 if (!de)
327 return -ENOMEM; 328 return -ENOMEM;
328 329
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index cd8e2df3c295..d489fcc86713 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -384,7 +384,7 @@ static void threads_stop(void)
384 dlm_astd_stop(); 384 dlm_astd_stop();
385} 385}
386 386
387static int new_lockspace(char *name, int namelen, void **lockspace, 387static int new_lockspace(const char *name, int namelen, void **lockspace,
388 uint32_t flags, int lvblen) 388 uint32_t flags, int lvblen)
389{ 389{
390 struct dlm_ls *ls; 390 struct dlm_ls *ls;
@@ -419,16 +419,14 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
419 break; 419 break;
420 } 420 }
421 ls->ls_create_count++; 421 ls->ls_create_count++;
422 module_put(THIS_MODULE); 422 *lockspace = ls;
423 error = 1; /* not an error, return 0 */ 423 error = 1;
424 break; 424 break;
425 } 425 }
426 spin_unlock(&lslist_lock); 426 spin_unlock(&lslist_lock);
427 427
428 if (error < 0)
429 goto out;
430 if (error) 428 if (error)
431 goto ret_zero; 429 goto out;
432 430
433 error = -ENOMEM; 431 error = -ENOMEM;
434 432
@@ -583,7 +581,6 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
583 dlm_create_debug_file(ls); 581 dlm_create_debug_file(ls);
584 582
585 log_debug(ls, "join complete"); 583 log_debug(ls, "join complete");
586 ret_zero:
587 *lockspace = ls; 584 *lockspace = ls;
588 return 0; 585 return 0;
589 586
@@ -614,7 +611,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
614 return error; 611 return error;
615} 612}
616 613
617int dlm_new_lockspace(char *name, int namelen, void **lockspace, 614int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
618 uint32_t flags, int lvblen) 615 uint32_t flags, int lvblen)
619{ 616{
620 int error = 0; 617 int error = 0;
@@ -628,7 +625,9 @@ int dlm_new_lockspace(char *name, int namelen, void **lockspace,
628 error = new_lockspace(name, namelen, lockspace, flags, lvblen); 625 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
629 if (!error) 626 if (!error)
630 ls_count++; 627 ls_count++;
631 else if (!ls_count) 628 if (error > 0)
629 error = 0;
630 if (!ls_count)
632 threads_stop(); 631 threads_stop();
633 out: 632 out:
634 mutex_unlock(&ls_lock); 633 mutex_unlock(&ls_lock);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 609108a83267..cdb580a9c7a2 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -309,6 +309,20 @@ static void lowcomms_state_change(struct sock *sk)
309 lowcomms_write_space(sk); 309 lowcomms_write_space(sk);
310} 310}
311 311
312int dlm_lowcomms_connect_node(int nodeid)
313{
314 struct connection *con;
315
316 if (nodeid == dlm_our_nodeid())
317 return 0;
318
319 con = nodeid2con(nodeid, GFP_NOFS);
320 if (!con)
321 return -ENOMEM;
322 lowcomms_connect_sock(con);
323 return 0;
324}
325
312/* Make a socket active */ 326/* Make a socket active */
313static int add_sock(struct socket *sock, struct connection *con) 327static int add_sock(struct socket *sock, struct connection *con)
314{ 328{
@@ -486,7 +500,7 @@ static void process_sctp_notification(struct connection *con,
486 return; 500 return;
487 } 501 }
488 502
489 new_con = nodeid2con(nodeid, GFP_KERNEL); 503 new_con = nodeid2con(nodeid, GFP_NOFS);
490 if (!new_con) 504 if (!new_con)
491 return; 505 return;
492 506
@@ -722,7 +736,7 @@ static int tcp_accept_from_sock(struct connection *con)
722 * the same time and the connections cross on the wire. 736 * the same time and the connections cross on the wire.
723 * In this case we store the incoming one in "othercon" 737 * In this case we store the incoming one in "othercon"
724 */ 738 */
725 newcon = nodeid2con(nodeid, GFP_KERNEL); 739 newcon = nodeid2con(nodeid, GFP_NOFS);
726 if (!newcon) { 740 if (!newcon) {
727 result = -ENOMEM; 741 result = -ENOMEM;
728 goto accept_err; 742 goto accept_err;
@@ -732,7 +746,7 @@ static int tcp_accept_from_sock(struct connection *con)
732 struct connection *othercon = newcon->othercon; 746 struct connection *othercon = newcon->othercon;
733 747
734 if (!othercon) { 748 if (!othercon) {
735 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); 749 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
736 if (!othercon) { 750 if (!othercon) {
737 log_print("failed to allocate incoming socket"); 751 log_print("failed to allocate incoming socket");
738 mutex_unlock(&newcon->sock_mutex); 752 mutex_unlock(&newcon->sock_mutex);
@@ -1421,7 +1435,7 @@ static int work_start(void)
1421static void stop_conn(struct connection *con) 1435static void stop_conn(struct connection *con)
1422{ 1436{
1423 con->flags |= 0x0F; 1437 con->flags |= 0x0F;
1424 if (con->sock) 1438 if (con->sock && con->sock->sk)
1425 con->sock->sk->sk_user_data = NULL; 1439 con->sock->sk->sk_user_data = NULL;
1426} 1440}
1427 1441
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index a9a9618c0d3f..1311e6426287 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -19,6 +19,7 @@ void dlm_lowcomms_stop(void);
19int dlm_lowcomms_close(int nodeid); 19int dlm_lowcomms_close(int nodeid);
20void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc); 20void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
21void dlm_lowcomms_commit_buffer(void *mh); 21void dlm_lowcomms_commit_buffer(void *mh);
22int dlm_lowcomms_connect_node(int nodeid);
22 23
23#endif /* __LOWCOMMS_DOT_H__ */ 24#endif /* __LOWCOMMS_DOT_H__ */
24 25
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 26133f05ae3a..b128775913b2 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. 4** Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved.
5** 5**
6** This copyrighted material is made available to anyone wishing to use, 6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions 7** modify, copy, or redistribute it subject to the terms and conditions
@@ -17,6 +17,7 @@
17#include "recover.h" 17#include "recover.h"
18#include "rcom.h" 18#include "rcom.h"
19#include "config.h" 19#include "config.h"
20#include "lowcomms.h"
20 21
21static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new) 22static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
22{ 23{
@@ -45,9 +46,9 @@ static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
45static int dlm_add_member(struct dlm_ls *ls, int nodeid) 46static int dlm_add_member(struct dlm_ls *ls, int nodeid)
46{ 47{
47 struct dlm_member *memb; 48 struct dlm_member *memb;
48 int w; 49 int w, error;
49 50
50 memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL); 51 memb = kzalloc(sizeof(struct dlm_member), ls->ls_allocation);
51 if (!memb) 52 if (!memb)
52 return -ENOMEM; 53 return -ENOMEM;
53 54
@@ -57,6 +58,12 @@ static int dlm_add_member(struct dlm_ls *ls, int nodeid)
57 return w; 58 return w;
58 } 59 }
59 60
61 error = dlm_lowcomms_connect_node(nodeid);
62 if (error < 0) {
63 kfree(memb);
64 return error;
65 }
66
60 memb->nodeid = nodeid; 67 memb->nodeid = nodeid;
61 memb->weight = w; 68 memb->weight = w;
62 add_ordered_member(ls, memb); 69 add_ordered_member(ls, memb);
@@ -136,7 +143,7 @@ static void make_member_array(struct dlm_ls *ls)
136 143
137 ls->ls_total_weight = total; 144 ls->ls_total_weight = total;
138 145
139 array = kmalloc(sizeof(int) * total, GFP_KERNEL); 146 array = kmalloc(sizeof(int) * total, ls->ls_allocation);
140 if (!array) 147 if (!array)
141 return; 148 return;
142 149
@@ -219,7 +226,7 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
219 continue; 226 continue;
220 log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]); 227 log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
221 228
222 memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL); 229 memb = kzalloc(sizeof(struct dlm_member), ls->ls_allocation);
223 if (!memb) 230 if (!memb)
224 return -ENOMEM; 231 return -ENOMEM;
225 memb->nodeid = rv->new[i]; 232 memb->nodeid = rv->new[i];
@@ -334,7 +341,7 @@ int dlm_ls_start(struct dlm_ls *ls)
334 int *ids = NULL, *new = NULL; 341 int *ids = NULL, *new = NULL;
335 int error, ids_count = 0, new_count = 0; 342 int error, ids_count = 0, new_count = 0;
336 343
337 rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL); 344 rv = kzalloc(sizeof(struct dlm_recover), ls->ls_allocation);
338 if (!rv) 345 if (!rv)
339 return -ENOMEM; 346 return -ENOMEM;
340 347
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index daa4183fbb84..7a2307c08911 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -35,7 +35,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
35 struct rq_entry *e; 35 struct rq_entry *e;
36 int length = ms->m_header.h_length - sizeof(struct dlm_message); 36 int length = ms->m_header.h_length - sizeof(struct dlm_message);
37 37
38 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 38 e = kmalloc(sizeof(struct rq_entry) + length, ls->ls_allocation);
39 if (!e) { 39 if (!e) {
40 log_print("dlm_add_requestqueue: out of memory len %d", length); 40 log_print("dlm_add_requestqueue: out of memory len %d", length);
41 return; 41 return;
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 2a701d593d35..3f0e1974abdc 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -16,6 +16,7 @@
16#include <linux/anon_inodes.h> 16#include <linux/anon_inodes.h>
17#include <linux/eventfd.h> 17#include <linux/eventfd.h>
18#include <linux/syscalls.h> 18#include <linux/syscalls.h>
19#include <linux/module.h>
19 20
20struct eventfd_ctx { 21struct eventfd_ctx {
21 wait_queue_head_t wqh; 22 wait_queue_head_t wqh;
@@ -56,6 +57,7 @@ int eventfd_signal(struct file *file, int n)
56 57
57 return n; 58 return n;
58} 59}
60EXPORT_SYMBOL_GPL(eventfd_signal);
59 61
60static int eventfd_release(struct inode *inode, struct file *file) 62static int eventfd_release(struct inode *inode, struct file *file)
61{ 63{
@@ -197,6 +199,7 @@ struct file *eventfd_fget(int fd)
197 199
198 return file; 200 return file;
199} 201}
202EXPORT_SYMBOL_GPL(eventfd_fget);
200 203
201SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) 204SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
202{ 205{
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index b1512c4bb8c7..24667eedc023 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -175,10 +175,4 @@ int exofs_async_op(struct osd_request *or,
175 175
176int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr); 176int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr);
177 177
178int osd_req_read_kern(struct osd_request *or,
179 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
180
181int osd_req_write_kern(struct osd_request *or,
182 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
183
184#endif /*ifndef __EXOFS_COM_H__*/ 178#endif /*ifndef __EXOFS_COM_H__*/
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index ba8d9fab4693..77d0a295eb1c 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -59,10 +59,9 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
59 struct inode *inode) 59 struct inode *inode)
60{ 60{
61 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; 61 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
62 struct request_queue *req_q = sbi->s_dev->scsi_device->request_queue;
63 62
64 pcol->sbi = sbi; 63 pcol->sbi = sbi;
65 pcol->req_q = req_q; 64 pcol->req_q = osd_request_queue(sbi->s_dev);
66 pcol->inode = inode; 65 pcol->inode = inode;
67 pcol->expected_pages = expected_pages; 66 pcol->expected_pages = expected_pages;
68 67
@@ -266,7 +265,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
266 goto err; 265 goto err;
267 } 266 }
268 267
269 osd_req_read(or, &obj, pcol->bio, i_start); 268 osd_req_read(or, &obj, i_start, pcol->bio, pcol->length);
270 269
271 if (is_sync) { 270 if (is_sync) {
272 exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred); 271 exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred);
@@ -522,7 +521,8 @@ static int write_exec(struct page_collect *pcol)
522 521
523 *pcol_copy = *pcol; 522 *pcol_copy = *pcol;
524 523
525 osd_req_write(or, &obj, pcol_copy->bio, i_start); 524 pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
525 osd_req_write(or, &obj, i_start, pcol_copy->bio, pcol_copy->length);
526 ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred); 526 ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred);
527 if (unlikely(ret)) { 527 if (unlikely(ret)) {
528 EXOFS_ERR("write_exec: exofs_async_op() Faild\n"); 528 EXOFS_ERR("write_exec: exofs_async_op() Faild\n");
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index 06ca92672eb5..b3d2ccb87aaa 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -125,29 +125,3 @@ int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
125 125
126 return -EIO; 126 return -EIO;
127} 127}
128
129int osd_req_read_kern(struct osd_request *or,
130 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
131{
132 struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
133 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
134
135 if (!bio)
136 return -ENOMEM;
137
138 osd_req_read(or, obj, bio, offset);
139 return 0;
140}
141
142int osd_req_write_kern(struct osd_request *or,
143 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
144{
145 struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
146 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
147
148 if (!bio)
149 return -ENOMEM;
150
151 osd_req_write(or, obj, bio, offset);
152 return 0;
153}
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 72437065f6ad..e95eeb445e58 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_FUSE_FS) += fuse.o 5obj-$(CONFIG_FUSE_FS) += fuse.o
6obj-$(CONFIG_CUSE) += cuse.o
6 7
7fuse-objs := dev.o dir.o file.o inode.o control.o 8fuse-objs := dev.o dir.o file.o inode.o control.o
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
new file mode 100644
index 000000000000..de792dcf3274
--- /dev/null
+++ b/fs/fuse/cuse.c
@@ -0,0 +1,610 @@
1/*
2 * CUSE: Character device in Userspace
3 *
4 * Copyright (C) 2008-2009 SUSE Linux Products GmbH
5 * Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * CUSE enables character devices to be implemented from userland much
10 * like FUSE allows filesystems. On initialization /dev/cuse is
11 * created. By opening the file and replying to the CUSE_INIT request
12 * userland CUSE server can create a character device. After that the
13 * operation is very similar to FUSE.
14 *
15 * A CUSE instance involves the following objects.
16 *
17 * cuse_conn : contains fuse_conn and serves as bonding structure
18 * channel : file handle connected to the userland CUSE server
19 * cdev : the implemented character device
20 * dev : generic device for cdev
21 *
22 * Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with
23 * devices, it's called 'channel' to reduce confusion.
24 *
25 * channel determines when the character device dies. When channel is
26 * closed, everything begins to destruct. The cuse_conn is taken off
27 * the lookup table preventing further access from cdev, cdev and
28 * generic device are removed and the base reference of cuse_conn is
29 * put.
30 *
31 * On each open, the matching cuse_conn is looked up and if found an
32 * additional reference is taken which is released when the file is
33 * closed.
34 */
35
36#include <linux/fuse.h>
37#include <linux/cdev.h>
38#include <linux/device.h>
39#include <linux/file.h>
40#include <linux/fs.h>
41#include <linux/kdev_t.h>
42#include <linux/kthread.h>
43#include <linux/list.h>
44#include <linux/magic.h>
45#include <linux/miscdevice.h>
46#include <linux/mutex.h>
47#include <linux/spinlock.h>
48#include <linux/stat.h>
49
50#include "fuse_i.h"
51
52#define CUSE_CONNTBL_LEN 64
53
54struct cuse_conn {
55 struct list_head list; /* linked on cuse_conntbl */
56 struct fuse_conn fc; /* fuse connection */
57 struct cdev *cdev; /* associated character device */
58 struct device *dev; /* device representing @cdev */
59
60 /* init parameters, set once during initialization */
61 bool unrestricted_ioctl;
62};
63
64static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */
65static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
66static struct class *cuse_class;
67
68static struct cuse_conn *fc_to_cc(struct fuse_conn *fc)
69{
70 return container_of(fc, struct cuse_conn, fc);
71}
72
73static struct list_head *cuse_conntbl_head(dev_t devt)
74{
75 return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN];
76}
77
78
79/**************************************************************************
80 * CUSE frontend operations
81 *
82 * These are file operations for the character device.
83 *
84 * On open, CUSE opens a file from the FUSE mnt and stores it to
85 * private_data of the open file. All other ops call FUSE ops on the
86 * FUSE file.
87 */
88
89static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
90 loff_t *ppos)
91{
92 loff_t pos = 0;
93
94 return fuse_direct_io(file, buf, count, &pos, 0);
95}
96
97static ssize_t cuse_write(struct file *file, const char __user *buf,
98 size_t count, loff_t *ppos)
99{
100 loff_t pos = 0;
101 /*
102 * No locking or generic_write_checks(), the server is
103 * responsible for locking and sanity checks.
104 */
105 return fuse_direct_io(file, buf, count, &pos, 1);
106}
107
108static int cuse_open(struct inode *inode, struct file *file)
109{
110 dev_t devt = inode->i_cdev->dev;
111 struct cuse_conn *cc = NULL, *pos;
112 int rc;
113
114 /* look up and get the connection */
115 spin_lock(&cuse_lock);
116 list_for_each_entry(pos, cuse_conntbl_head(devt), list)
117 if (pos->dev->devt == devt) {
118 fuse_conn_get(&pos->fc);
119 cc = pos;
120 break;
121 }
122 spin_unlock(&cuse_lock);
123
124 /* dead? */
125 if (!cc)
126 return -ENODEV;
127
128 /*
129 * Generic permission check is already done against the chrdev
130 * file, proceed to open.
131 */
132 rc = fuse_do_open(&cc->fc, 0, file, 0);
133 if (rc)
134 fuse_conn_put(&cc->fc);
135 return rc;
136}
137
138static int cuse_release(struct inode *inode, struct file *file)
139{
140 struct fuse_file *ff = file->private_data;
141 struct fuse_conn *fc = ff->fc;
142
143 fuse_sync_release(ff, file->f_flags);
144 fuse_conn_put(fc);
145
146 return 0;
147}
148
149static long cuse_file_ioctl(struct file *file, unsigned int cmd,
150 unsigned long arg)
151{
152 struct fuse_file *ff = file->private_data;
153 struct cuse_conn *cc = fc_to_cc(ff->fc);
154 unsigned int flags = 0;
155
156 if (cc->unrestricted_ioctl)
157 flags |= FUSE_IOCTL_UNRESTRICTED;
158
159 return fuse_do_ioctl(file, cmd, arg, flags);
160}
161
162static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd,
163 unsigned long arg)
164{
165 struct fuse_file *ff = file->private_data;
166 struct cuse_conn *cc = fc_to_cc(ff->fc);
167 unsigned int flags = FUSE_IOCTL_COMPAT;
168
169 if (cc->unrestricted_ioctl)
170 flags |= FUSE_IOCTL_UNRESTRICTED;
171
172 return fuse_do_ioctl(file, cmd, arg, flags);
173}
174
175static const struct file_operations cuse_frontend_fops = {
176 .owner = THIS_MODULE,
177 .read = cuse_read,
178 .write = cuse_write,
179 .open = cuse_open,
180 .release = cuse_release,
181 .unlocked_ioctl = cuse_file_ioctl,
182 .compat_ioctl = cuse_file_compat_ioctl,
183 .poll = fuse_file_poll,
184};
185
186
187/**************************************************************************
188 * CUSE channel initialization and destruction
189 */
190
191struct cuse_devinfo {
192 const char *name;
193};
194
195/**
196 * cuse_parse_one - parse one key=value pair
197 * @pp: i/o parameter for the current position
198 * @end: points to one past the end of the packed string
199 * @keyp: out parameter for key
200 * @valp: out parameter for value
201 *
202 * *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends
203 * at @end - 1. This function parses one pair and set *@keyp to the
204 * start of the key and *@valp to the start of the value. Note that
205 * the original string is modified such that the key string is
206 * terminated with '\0'. *@pp is updated to point to the next string.
207 *
208 * RETURNS:
209 * 1 on successful parse, 0 on EOF, -errno on failure.
210 */
211static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
212{
213 char *p = *pp;
214 char *key, *val;
215
216 while (p < end && *p == '\0')
217 p++;
218 if (p == end)
219 return 0;
220
221 if (end[-1] != '\0') {
222 printk(KERN_ERR "CUSE: info not properly terminated\n");
223 return -EINVAL;
224 }
225
226 key = val = p;
227 p += strlen(p);
228
229 if (valp) {
230 strsep(&val, "=");
231 if (!val)
232 val = key + strlen(key);
233 key = strstrip(key);
234 val = strstrip(val);
235 } else
236 key = strstrip(key);
237
238 if (!strlen(key)) {
239 printk(KERN_ERR "CUSE: zero length info key specified\n");
240 return -EINVAL;
241 }
242
243 *pp = p;
244 *keyp = key;
245 if (valp)
246 *valp = val;
247
248 return 1;
249}
250
251/**
252 * cuse_parse_dev_info - parse device info
253 * @p: device info string
254 * @len: length of device info string
255 * @devinfo: out parameter for parsed device info
256 *
257 * Parse @p to extract device info and store it into @devinfo. String
258 * pointed to by @p is modified by parsing and @devinfo points into
259 * them, so @p shouldn't be freed while @devinfo is in use.
260 *
261 * RETURNS:
262 * 0 on success, -errno on failure.
263 */
264static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
265{
266 char *end = p + len;
267 char *key, *val;
268 int rc;
269
270 while (true) {
271 rc = cuse_parse_one(&p, end, &key, &val);
272 if (rc < 0)
273 return rc;
274 if (!rc)
275 break;
276 if (strcmp(key, "DEVNAME") == 0)
277 devinfo->name = val;
278 else
279 printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n",
280 key);
281 }
282
283 if (!devinfo->name || !strlen(devinfo->name)) {
284 printk(KERN_ERR "CUSE: DEVNAME unspecified\n");
285 return -EINVAL;
286 }
287
288 return 0;
289}
290
291static void cuse_gendev_release(struct device *dev)
292{
293 kfree(dev);
294}
295
296/**
297 * cuse_process_init_reply - finish initializing CUSE channel
298 *
299 * This function creates the character device and sets up all the
300 * required data structures for it. Please read the comment at the
301 * top of this file for high level overview.
302 */
303static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
304{
305 struct cuse_conn *cc = fc_to_cc(fc);
306 struct cuse_init_out *arg = &req->misc.cuse_init_out;
307 struct page *page = req->pages[0];
308 struct cuse_devinfo devinfo = { };
309 struct device *dev;
310 struct cdev *cdev;
311 dev_t devt;
312 int rc;
313
314 if (req->out.h.error ||
315 arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
316 goto err;
317 }
318
319 fc->minor = arg->minor;
320 fc->max_read = max_t(unsigned, arg->max_read, 4096);
321 fc->max_write = max_t(unsigned, arg->max_write, 4096);
322
323 /* parse init reply */
324 cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL;
325
326 rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size,
327 &devinfo);
328 if (rc)
329 goto err;
330
331 /* determine and reserve devt */
332 devt = MKDEV(arg->dev_major, arg->dev_minor);
333 if (!MAJOR(devt))
334 rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name);
335 else
336 rc = register_chrdev_region(devt, 1, devinfo.name);
337 if (rc) {
338 printk(KERN_ERR "CUSE: failed to register chrdev region\n");
339 goto err;
340 }
341
342 /* devt determined, create device */
343 rc = -ENOMEM;
344 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
345 if (!dev)
346 goto err_region;
347
348 device_initialize(dev);
349 dev_set_uevent_suppress(dev, 1);
350 dev->class = cuse_class;
351 dev->devt = devt;
352 dev->release = cuse_gendev_release;
353 dev_set_drvdata(dev, cc);
354 dev_set_name(dev, "%s", devinfo.name);
355
356 rc = device_add(dev);
357 if (rc)
358 goto err_device;
359
360 /* register cdev */
361 rc = -ENOMEM;
362 cdev = cdev_alloc();
363 if (!cdev)
364 goto err_device;
365
366 cdev->owner = THIS_MODULE;
367 cdev->ops = &cuse_frontend_fops;
368
369 rc = cdev_add(cdev, devt, 1);
370 if (rc)
371 goto err_cdev;
372
373 cc->dev = dev;
374 cc->cdev = cdev;
375
376 /* make the device available */
377 spin_lock(&cuse_lock);
378 list_add(&cc->list, cuse_conntbl_head(devt));
379 spin_unlock(&cuse_lock);
380
381 /* announce device availability */
382 dev_set_uevent_suppress(dev, 0);
383 kobject_uevent(&dev->kobj, KOBJ_ADD);
384out:
385 __free_page(page);
386 return;
387
388err_cdev:
389 cdev_del(cdev);
390err_device:
391 put_device(dev);
392err_region:
393 unregister_chrdev_region(devt, 1);
394err:
395 fc->conn_error = 1;
396 goto out;
397}
398
399static int cuse_send_init(struct cuse_conn *cc)
400{
401 int rc;
402 struct fuse_req *req;
403 struct page *page;
404 struct fuse_conn *fc = &cc->fc;
405 struct cuse_init_in *arg;
406
407 BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
408
409 req = fuse_get_req(fc);
410 if (IS_ERR(req)) {
411 rc = PTR_ERR(req);
412 goto err;
413 }
414
415 rc = -ENOMEM;
416 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
417 if (!page)
418 goto err_put_req;
419
420 arg = &req->misc.cuse_init_in;
421 arg->major = FUSE_KERNEL_VERSION;
422 arg->minor = FUSE_KERNEL_MINOR_VERSION;
423 arg->flags |= CUSE_UNRESTRICTED_IOCTL;
424 req->in.h.opcode = CUSE_INIT;
425 req->in.numargs = 1;
426 req->in.args[0].size = sizeof(struct cuse_init_in);
427 req->in.args[0].value = arg;
428 req->out.numargs = 2;
429 req->out.args[0].size = sizeof(struct cuse_init_out);
430 req->out.args[0].value = &req->misc.cuse_init_out;
431 req->out.args[1].size = CUSE_INIT_INFO_MAX;
432 req->out.argvar = 1;
433 req->out.argpages = 1;
434 req->pages[0] = page;
435 req->num_pages = 1;
436 req->end = cuse_process_init_reply;
437 fuse_request_send_background(fc, req);
438
439 return 0;
440
441err_put_req:
442 fuse_put_request(fc, req);
443err:
444 return rc;
445}
446
447static void cuse_fc_release(struct fuse_conn *fc)
448{
449 struct cuse_conn *cc = fc_to_cc(fc);
450 kfree(cc);
451}
452
453/**
454 * cuse_channel_open - open method for /dev/cuse
455 * @inode: inode for /dev/cuse
456 * @file: file struct being opened
457 *
458 * Userland CUSE server can create a CUSE device by opening /dev/cuse
459 * and replying to the initilaization request kernel sends. This
460 * function is responsible for handling CUSE device initialization.
461 * Because the fd opened by this function is used during
462 * initialization, this function only creates cuse_conn and sends
463 * init. The rest is delegated to a kthread.
464 *
465 * RETURNS:
466 * 0 on success, -errno on failure.
467 */
468static int cuse_channel_open(struct inode *inode, struct file *file)
469{
470 struct cuse_conn *cc;
471 int rc;
472
473 /* set up cuse_conn */
474 cc = kzalloc(sizeof(*cc), GFP_KERNEL);
475 if (!cc)
476 return -ENOMEM;
477
478 fuse_conn_init(&cc->fc);
479
480 INIT_LIST_HEAD(&cc->list);
481 cc->fc.release = cuse_fc_release;
482
483 cc->fc.connected = 1;
484 cc->fc.blocked = 0;
485 rc = cuse_send_init(cc);
486 if (rc) {
487 fuse_conn_put(&cc->fc);
488 return rc;
489 }
490 file->private_data = &cc->fc; /* channel owns base reference to cc */
491
492 return 0;
493}
494
495/**
496 * cuse_channel_release - release method for /dev/cuse
497 * @inode: inode for /dev/cuse
498 * @file: file struct being closed
499 *
500 * Disconnect the channel, deregister CUSE device and initiate
501 * destruction by putting the default reference.
502 *
503 * RETURNS:
504 * 0 on success, -errno on failure.
505 */
506static int cuse_channel_release(struct inode *inode, struct file *file)
507{
508 struct cuse_conn *cc = fc_to_cc(file->private_data);
509 int rc;
510
511 /* remove from the conntbl, no more access from this point on */
512 spin_lock(&cuse_lock);
513 list_del_init(&cc->list);
514 spin_unlock(&cuse_lock);
515
516 /* remove device */
517 if (cc->dev)
518 device_unregister(cc->dev);
519 if (cc->cdev) {
520 unregister_chrdev_region(cc->cdev->dev, 1);
521 cdev_del(cc->cdev);
522 }
523
524 /* kill connection and shutdown channel */
525 fuse_conn_kill(&cc->fc);
526 rc = fuse_dev_release(inode, file); /* puts the base reference */
527
528 return rc;
529}
530
531static struct file_operations cuse_channel_fops; /* initialized during init */
532
533
534/**************************************************************************
535 * Misc stuff and module initializatiion
536 *
537 * CUSE exports the same set of attributes to sysfs as fusectl.
538 */
539
540static ssize_t cuse_class_waiting_show(struct device *dev,
541 struct device_attribute *attr, char *buf)
542{
543 struct cuse_conn *cc = dev_get_drvdata(dev);
544
545 return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting));
546}
547
548static ssize_t cuse_class_abort_store(struct device *dev,
549 struct device_attribute *attr,
550 const char *buf, size_t count)
551{
552 struct cuse_conn *cc = dev_get_drvdata(dev);
553
554 fuse_abort_conn(&cc->fc);
555 return count;
556}
557
558static struct device_attribute cuse_class_dev_attrs[] = {
559 __ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL),
560 __ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store),
561 { }
562};
563
564static struct miscdevice cuse_miscdev = {
565 .minor = MISC_DYNAMIC_MINOR,
566 .name = "cuse",
567 .fops = &cuse_channel_fops,
568};
569
570static int __init cuse_init(void)
571{
572 int i, rc;
573
574 /* init conntbl */
575 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
576 INIT_LIST_HEAD(&cuse_conntbl[i]);
577
578 /* inherit and extend fuse_dev_operations */
579 cuse_channel_fops = fuse_dev_operations;
580 cuse_channel_fops.owner = THIS_MODULE;
581 cuse_channel_fops.open = cuse_channel_open;
582 cuse_channel_fops.release = cuse_channel_release;
583
584 cuse_class = class_create(THIS_MODULE, "cuse");
585 if (IS_ERR(cuse_class))
586 return PTR_ERR(cuse_class);
587
588 cuse_class->dev_attrs = cuse_class_dev_attrs;
589
590 rc = misc_register(&cuse_miscdev);
591 if (rc) {
592 class_destroy(cuse_class);
593 return rc;
594 }
595
596 return 0;
597}
598
599static void __exit cuse_exit(void)
600{
601 misc_deregister(&cuse_miscdev);
602 class_destroy(cuse_class);
603}
604
605module_init(cuse_init);
606module_exit(cuse_exit);
607
608MODULE_AUTHOR("Tejun Heo <tj@kernel.org>");
609MODULE_DESCRIPTION("Character device in Userspace");
610MODULE_LICENSE("GPL");
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ba76b68c52ff..8fed2ed12f38 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -46,6 +46,7 @@ struct fuse_req *fuse_request_alloc(void)
46 fuse_request_init(req); 46 fuse_request_init(req);
47 return req; 47 return req;
48} 48}
49EXPORT_SYMBOL_GPL(fuse_request_alloc);
49 50
50struct fuse_req *fuse_request_alloc_nofs(void) 51struct fuse_req *fuse_request_alloc_nofs(void)
51{ 52{
@@ -124,6 +125,7 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
124 atomic_dec(&fc->num_waiting); 125 atomic_dec(&fc->num_waiting);
125 return ERR_PTR(err); 126 return ERR_PTR(err);
126} 127}
128EXPORT_SYMBOL_GPL(fuse_get_req);
127 129
128/* 130/*
129 * Return request in fuse_file->reserved_req. However that may 131 * Return request in fuse_file->reserved_req. However that may
@@ -208,6 +210,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
208 fuse_request_free(req); 210 fuse_request_free(req);
209 } 211 }
210} 212}
213EXPORT_SYMBOL_GPL(fuse_put_request);
211 214
212static unsigned len_args(unsigned numargs, struct fuse_arg *args) 215static unsigned len_args(unsigned numargs, struct fuse_arg *args)
213{ 216{
@@ -282,7 +285,7 @@ __releases(&fc->lock)
282 wake_up_all(&fc->blocked_waitq); 285 wake_up_all(&fc->blocked_waitq);
283 } 286 }
284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 287 if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
285 fc->connected) { 288 fc->connected && fc->bdi_initialized) {
286 clear_bdi_congested(&fc->bdi, READ); 289 clear_bdi_congested(&fc->bdi, READ);
287 clear_bdi_congested(&fc->bdi, WRITE); 290 clear_bdi_congested(&fc->bdi, WRITE);
288 } 291 }
@@ -400,6 +403,7 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
400 } 403 }
401 spin_unlock(&fc->lock); 404 spin_unlock(&fc->lock);
402} 405}
406EXPORT_SYMBOL_GPL(fuse_request_send);
403 407
404static void fuse_request_send_nowait_locked(struct fuse_conn *fc, 408static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
405 struct fuse_req *req) 409 struct fuse_req *req)
@@ -408,7 +412,8 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
408 fc->num_background++; 412 fc->num_background++;
409 if (fc->num_background == FUSE_MAX_BACKGROUND) 413 if (fc->num_background == FUSE_MAX_BACKGROUND)
410 fc->blocked = 1; 414 fc->blocked = 1;
411 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { 415 if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
416 fc->bdi_initialized) {
412 set_bdi_congested(&fc->bdi, READ); 417 set_bdi_congested(&fc->bdi, READ);
413 set_bdi_congested(&fc->bdi, WRITE); 418 set_bdi_congested(&fc->bdi, WRITE);
414 } 419 }
@@ -439,6 +444,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
439 req->isreply = 1; 444 req->isreply = 1;
440 fuse_request_send_nowait(fc, req); 445 fuse_request_send_nowait(fc, req);
441} 446}
447EXPORT_SYMBOL_GPL(fuse_request_send_background);
442 448
443/* 449/*
444 * Called under fc->lock 450 * Called under fc->lock
@@ -1105,8 +1111,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
1105 } 1111 }
1106 spin_unlock(&fc->lock); 1112 spin_unlock(&fc->lock);
1107} 1113}
1114EXPORT_SYMBOL_GPL(fuse_abort_conn);
1108 1115
1109static int fuse_dev_release(struct inode *inode, struct file *file) 1116int fuse_dev_release(struct inode *inode, struct file *file)
1110{ 1117{
1111 struct fuse_conn *fc = fuse_get_conn(file); 1118 struct fuse_conn *fc = fuse_get_conn(file);
1112 if (fc) { 1119 if (fc) {
@@ -1120,6 +1127,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
1120 1127
1121 return 0; 1128 return 0;
1122} 1129}
1130EXPORT_SYMBOL_GPL(fuse_dev_release);
1123 1131
1124static int fuse_dev_fasync(int fd, struct file *file, int on) 1132static int fuse_dev_fasync(int fd, struct file *file, int on)
1125{ 1133{
@@ -1142,6 +1150,7 @@ const struct file_operations fuse_dev_operations = {
1142 .release = fuse_dev_release, 1150 .release = fuse_dev_release,
1143 .fasync = fuse_dev_fasync, 1151 .fasync = fuse_dev_fasync,
1144}; 1152};
1153EXPORT_SYMBOL_GPL(fuse_dev_operations);
1145 1154
1146static struct miscdevice fuse_miscdevice = { 1155static struct miscdevice fuse_miscdevice = {
1147 .minor = FUSE_MINOR, 1156 .minor = FUSE_MINOR,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8b8eebc5614b..b3089a083d30 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -362,19 +362,6 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
362} 362}
363 363
364/* 364/*
365 * Synchronous release for the case when something goes wrong in CREATE_OPEN
366 */
367static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff,
368 u64 nodeid, int flags)
369{
370 fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE);
371 ff->reserved_req->force = 1;
372 fuse_request_send(fc, ff->reserved_req);
373 fuse_put_request(fc, ff->reserved_req);
374 kfree(ff);
375}
376
377/*
378 * Atomic create+open operation 365 * Atomic create+open operation
379 * 366 *
380 * If the filesystem doesn't support this, then fall back to separate 367 * If the filesystem doesn't support this, then fall back to separate
@@ -445,12 +432,14 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
445 goto out_free_ff; 432 goto out_free_ff;
446 433
447 fuse_put_request(fc, req); 434 fuse_put_request(fc, req);
435 ff->fh = outopen.fh;
436 ff->nodeid = outentry.nodeid;
437 ff->open_flags = outopen.open_flags;
448 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, 438 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
449 &outentry.attr, entry_attr_timeout(&outentry), 0); 439 &outentry.attr, entry_attr_timeout(&outentry), 0);
450 if (!inode) { 440 if (!inode) {
451 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 441 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
452 ff->fh = outopen.fh; 442 fuse_sync_release(ff, flags);
453 fuse_sync_release(fc, ff, outentry.nodeid, flags);
454 fuse_send_forget(fc, forget_req, outentry.nodeid, 1); 443 fuse_send_forget(fc, forget_req, outentry.nodeid, 1);
455 return -ENOMEM; 444 return -ENOMEM;
456 } 445 }
@@ -460,11 +449,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
460 fuse_invalidate_attr(dir); 449 fuse_invalidate_attr(dir);
461 file = lookup_instantiate_filp(nd, entry, generic_file_open); 450 file = lookup_instantiate_filp(nd, entry, generic_file_open);
462 if (IS_ERR(file)) { 451 if (IS_ERR(file)) {
463 ff->fh = outopen.fh; 452 fuse_sync_release(ff, flags);
464 fuse_sync_release(fc, ff, outentry.nodeid, flags);
465 return PTR_ERR(file); 453 return PTR_ERR(file);
466 } 454 }
467 fuse_finish_open(inode, file, ff, &outopen); 455 file->private_data = fuse_file_get(ff);
456 fuse_finish_open(inode, file);
468 return 0; 457 return 0;
469 458
470 out_free_ff: 459 out_free_ff:
@@ -1035,7 +1024,7 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
1035 req->out.argpages = 1; 1024 req->out.argpages = 1;
1036 req->num_pages = 1; 1025 req->num_pages = 1;
1037 req->pages[0] = page; 1026 req->pages[0] = page;
1038 fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR); 1027 fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR);
1039 fuse_request_send(fc, req); 1028 fuse_request_send(fc, req);
1040 nbytes = req->out.args[0].size; 1029 nbytes = req->out.args[0].size;
1041 err = req->out.h.error; 1030 err = req->out.h.error;
@@ -1101,12 +1090,14 @@ static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
1101 1090
1102static int fuse_dir_open(struct inode *inode, struct file *file) 1091static int fuse_dir_open(struct inode *inode, struct file *file)
1103{ 1092{
1104 return fuse_open_common(inode, file, 1); 1093 return fuse_open_common(inode, file, true);
1105} 1094}
1106 1095
1107static int fuse_dir_release(struct inode *inode, struct file *file) 1096static int fuse_dir_release(struct inode *inode, struct file *file)
1108{ 1097{
1109 return fuse_release_common(inode, file, 1); 1098 fuse_release_common(file, FUSE_RELEASEDIR);
1099
1100 return 0;
1110} 1101}
1111 1102
1112static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync) 1103static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 06f30e965676..fce6ce694fde 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -12,13 +12,13 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/module.h>
15 16
16static const struct file_operations fuse_direct_io_file_operations; 17static const struct file_operations fuse_direct_io_file_operations;
17 18
18static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 19static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
19 struct fuse_open_out *outargp) 20 int opcode, struct fuse_open_out *outargp)
20{ 21{
21 struct fuse_conn *fc = get_fuse_conn(inode);
22 struct fuse_open_in inarg; 22 struct fuse_open_in inarg;
23 struct fuse_req *req; 23 struct fuse_req *req;
24 int err; 24 int err;
@@ -31,8 +31,8 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 31 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32 if (!fc->atomic_o_trunc) 32 if (!fc->atomic_o_trunc)
33 inarg.flags &= ~O_TRUNC; 33 inarg.flags &= ~O_TRUNC;
34 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 34 req->in.h.opcode = opcode;
35 req->in.h.nodeid = get_node_id(inode); 35 req->in.h.nodeid = nodeid;
36 req->in.numargs = 1; 36 req->in.numargs = 1;
37 req->in.args[0].size = sizeof(inarg); 37 req->in.args[0].size = sizeof(inarg);
38 req->in.args[0].value = &inarg; 38 req->in.args[0].value = &inarg;
@@ -49,22 +49,27 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
49struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 49struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
50{ 50{
51 struct fuse_file *ff; 51 struct fuse_file *ff;
52
52 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 53 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
53 if (ff) { 54 if (unlikely(!ff))
54 ff->reserved_req = fuse_request_alloc(); 55 return NULL;
55 if (!ff->reserved_req) { 56
56 kfree(ff); 57 ff->fc = fc;
57 return NULL; 58 ff->reserved_req = fuse_request_alloc();
58 } else { 59 if (unlikely(!ff->reserved_req)) {
59 INIT_LIST_HEAD(&ff->write_entry); 60 kfree(ff);
60 atomic_set(&ff->count, 0); 61 return NULL;
61 spin_lock(&fc->lock);
62 ff->kh = ++fc->khctr;
63 spin_unlock(&fc->lock);
64 }
65 RB_CLEAR_NODE(&ff->polled_node);
66 init_waitqueue_head(&ff->poll_wait);
67 } 62 }
63
64 INIT_LIST_HEAD(&ff->write_entry);
65 atomic_set(&ff->count, 0);
66 RB_CLEAR_NODE(&ff->polled_node);
67 init_waitqueue_head(&ff->poll_wait);
68
69 spin_lock(&fc->lock);
70 ff->kh = ++fc->khctr;
71 spin_unlock(&fc->lock);
72
68 return ff; 73 return ff;
69} 74}
70 75
@@ -74,7 +79,7 @@ void fuse_file_free(struct fuse_file *ff)
74 kfree(ff); 79 kfree(ff);
75} 80}
76 81
77static struct fuse_file *fuse_file_get(struct fuse_file *ff) 82struct fuse_file *fuse_file_get(struct fuse_file *ff)
78{ 83{
79 atomic_inc(&ff->count); 84 atomic_inc(&ff->count);
80 return ff; 85 return ff;
@@ -82,40 +87,65 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff)
82 87
83static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 88static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
84{ 89{
85 dput(req->misc.release.dentry); 90 path_put(&req->misc.release.path);
86 mntput(req->misc.release.vfsmount);
87} 91}
88 92
89static void fuse_file_put(struct fuse_file *ff) 93static void fuse_file_put(struct fuse_file *ff)
90{ 94{
91 if (atomic_dec_and_test(&ff->count)) { 95 if (atomic_dec_and_test(&ff->count)) {
92 struct fuse_req *req = ff->reserved_req; 96 struct fuse_req *req = ff->reserved_req;
93 struct inode *inode = req->misc.release.dentry->d_inode; 97
94 struct fuse_conn *fc = get_fuse_conn(inode);
95 req->end = fuse_release_end; 98 req->end = fuse_release_end;
96 fuse_request_send_background(fc, req); 99 fuse_request_send_background(ff->fc, req);
97 kfree(ff); 100 kfree(ff);
98 } 101 }
99} 102}
100 103
101void fuse_finish_open(struct inode *inode, struct file *file, 104int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
102 struct fuse_file *ff, struct fuse_open_out *outarg) 105 bool isdir)
103{ 106{
104 if (outarg->open_flags & FOPEN_DIRECT_IO) 107 struct fuse_open_out outarg;
108 struct fuse_file *ff;
109 int err;
110 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
111
112 ff = fuse_file_alloc(fc);
113 if (!ff)
114 return -ENOMEM;
115
116 err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
117 if (err) {
118 fuse_file_free(ff);
119 return err;
120 }
121
122 if (isdir)
123 outarg.open_flags &= ~FOPEN_DIRECT_IO;
124
125 ff->fh = outarg.fh;
126 ff->nodeid = nodeid;
127 ff->open_flags = outarg.open_flags;
128 file->private_data = fuse_file_get(ff);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(fuse_do_open);
133
134void fuse_finish_open(struct inode *inode, struct file *file)
135{
136 struct fuse_file *ff = file->private_data;
137
138 if (ff->open_flags & FOPEN_DIRECT_IO)
105 file->f_op = &fuse_direct_io_file_operations; 139 file->f_op = &fuse_direct_io_file_operations;
106 if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) 140 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
107 invalidate_inode_pages2(inode->i_mapping); 141 invalidate_inode_pages2(inode->i_mapping);
108 if (outarg->open_flags & FOPEN_NONSEEKABLE) 142 if (ff->open_flags & FOPEN_NONSEEKABLE)
109 nonseekable_open(inode, file); 143 nonseekable_open(inode, file);
110 ff->fh = outarg->fh;
111 file->private_data = fuse_file_get(ff);
112} 144}
113 145
114int fuse_open_common(struct inode *inode, struct file *file, int isdir) 146int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
115{ 147{
116 struct fuse_conn *fc = get_fuse_conn(inode); 148 struct fuse_conn *fc = get_fuse_conn(inode);
117 struct fuse_open_out outarg;
118 struct fuse_file *ff;
119 int err; 149 int err;
120 150
121 /* VFS checks this, but only _after_ ->open() */ 151 /* VFS checks this, but only _after_ ->open() */
@@ -126,78 +156,85 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
126 if (err) 156 if (err)
127 return err; 157 return err;
128 158
129 ff = fuse_file_alloc(fc); 159 err = fuse_do_open(fc, get_node_id(inode), file, isdir);
130 if (!ff)
131 return -ENOMEM;
132
133 err = fuse_send_open(inode, file, isdir, &outarg);
134 if (err) 160 if (err)
135 fuse_file_free(ff); 161 return err;
136 else {
137 if (isdir)
138 outarg.open_flags &= ~FOPEN_DIRECT_IO;
139 fuse_finish_open(inode, file, ff, &outarg);
140 }
141 162
142 return err; 163 fuse_finish_open(inode, file);
164
165 return 0;
143} 166}
144 167
145void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) 168static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
146{ 169{
170 struct fuse_conn *fc = ff->fc;
147 struct fuse_req *req = ff->reserved_req; 171 struct fuse_req *req = ff->reserved_req;
148 struct fuse_release_in *inarg = &req->misc.release.in; 172 struct fuse_release_in *inarg = &req->misc.release.in;
149 173
174 spin_lock(&fc->lock);
175 list_del(&ff->write_entry);
176 if (!RB_EMPTY_NODE(&ff->polled_node))
177 rb_erase(&ff->polled_node, &fc->polled_files);
178 spin_unlock(&fc->lock);
179
180 wake_up_interruptible_sync(&ff->poll_wait);
181
150 inarg->fh = ff->fh; 182 inarg->fh = ff->fh;
151 inarg->flags = flags; 183 inarg->flags = flags;
152 req->in.h.opcode = opcode; 184 req->in.h.opcode = opcode;
153 req->in.h.nodeid = nodeid; 185 req->in.h.nodeid = ff->nodeid;
154 req->in.numargs = 1; 186 req->in.numargs = 1;
155 req->in.args[0].size = sizeof(struct fuse_release_in); 187 req->in.args[0].size = sizeof(struct fuse_release_in);
156 req->in.args[0].value = inarg; 188 req->in.args[0].value = inarg;
157} 189}
158 190
159int fuse_release_common(struct inode *inode, struct file *file, int isdir) 191void fuse_release_common(struct file *file, int opcode)
160{ 192{
161 struct fuse_file *ff = file->private_data; 193 struct fuse_file *ff;
162 if (ff) { 194 struct fuse_req *req;
163 struct fuse_conn *fc = get_fuse_conn(inode);
164 struct fuse_req *req = ff->reserved_req;
165
166 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
167 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
168 195
169 /* Hold vfsmount and dentry until release is finished */ 196 ff = file->private_data;
170 req->misc.release.vfsmount = mntget(file->f_path.mnt); 197 if (unlikely(!ff))
171 req->misc.release.dentry = dget(file->f_path.dentry); 198 return;
172 199
173 spin_lock(&fc->lock); 200 req = ff->reserved_req;
174 list_del(&ff->write_entry); 201 fuse_prepare_release(ff, file->f_flags, opcode);
175 if (!RB_EMPTY_NODE(&ff->polled_node))
176 rb_erase(&ff->polled_node, &fc->polled_files);
177 spin_unlock(&fc->lock);
178 202
179 wake_up_interruptible_sync(&ff->poll_wait); 203 /* Hold vfsmount and dentry until release is finished */
180 /* 204 path_get(&file->f_path);
181 * Normally this will send the RELEASE request, 205 req->misc.release.path = file->f_path;
182 * however if some asynchronous READ or WRITE requests
183 * are outstanding, the sending will be delayed
184 */
185 fuse_file_put(ff);
186 }
187 206
188 /* Return value is ignored by VFS */ 207 /*
189 return 0; 208 * Normally this will send the RELEASE request, however if
209 * some asynchronous READ or WRITE requests are outstanding,
210 * the sending will be delayed.
211 */
212 fuse_file_put(ff);
190} 213}
191 214
192static int fuse_open(struct inode *inode, struct file *file) 215static int fuse_open(struct inode *inode, struct file *file)
193{ 216{
194 return fuse_open_common(inode, file, 0); 217 return fuse_open_common(inode, file, false);
195} 218}
196 219
197static int fuse_release(struct inode *inode, struct file *file) 220static int fuse_release(struct inode *inode, struct file *file)
198{ 221{
199 return fuse_release_common(inode, file, 0); 222 fuse_release_common(file, FUSE_RELEASE);
223
224 /* return value is ignored by VFS */
225 return 0;
226}
227
228void fuse_sync_release(struct fuse_file *ff, int flags)
229{
230 WARN_ON(atomic_read(&ff->count) > 1);
231 fuse_prepare_release(ff, flags, FUSE_RELEASE);
232 ff->reserved_req->force = 1;
233 fuse_request_send(ff->fc, ff->reserved_req);
234 fuse_put_request(ff->fc, ff->reserved_req);
235 kfree(ff);
200} 236}
237EXPORT_SYMBOL_GPL(fuse_sync_release);
201 238
202/* 239/*
203 * Scramble the ID space with XTEA, so that the value of the files_struct 240 * Scramble the ID space with XTEA, so that the value of the files_struct
@@ -371,8 +408,8 @@ static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
371 return fuse_fsync_common(file, de, datasync, 0); 408 return fuse_fsync_common(file, de, datasync, 0);
372} 409}
373 410
374void fuse_read_fill(struct fuse_req *req, struct file *file, 411void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
375 struct inode *inode, loff_t pos, size_t count, int opcode) 412 size_t count, int opcode)
376{ 413{
377 struct fuse_read_in *inarg = &req->misc.read.in; 414 struct fuse_read_in *inarg = &req->misc.read.in;
378 struct fuse_file *ff = file->private_data; 415 struct fuse_file *ff = file->private_data;
@@ -382,7 +419,7 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
382 inarg->size = count; 419 inarg->size = count;
383 inarg->flags = file->f_flags; 420 inarg->flags = file->f_flags;
384 req->in.h.opcode = opcode; 421 req->in.h.opcode = opcode;
385 req->in.h.nodeid = get_node_id(inode); 422 req->in.h.nodeid = ff->nodeid;
386 req->in.numargs = 1; 423 req->in.numargs = 1;
387 req->in.args[0].size = sizeof(struct fuse_read_in); 424 req->in.args[0].size = sizeof(struct fuse_read_in);
388 req->in.args[0].value = inarg; 425 req->in.args[0].value = inarg;
@@ -392,12 +429,12 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
392} 429}
393 430
394static size_t fuse_send_read(struct fuse_req *req, struct file *file, 431static size_t fuse_send_read(struct fuse_req *req, struct file *file,
395 struct inode *inode, loff_t pos, size_t count, 432 loff_t pos, size_t count, fl_owner_t owner)
396 fl_owner_t owner)
397{ 433{
398 struct fuse_conn *fc = get_fuse_conn(inode); 434 struct fuse_file *ff = file->private_data;
435 struct fuse_conn *fc = ff->fc;
399 436
400 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 437 fuse_read_fill(req, file, pos, count, FUSE_READ);
401 if (owner != NULL) { 438 if (owner != NULL) {
402 struct fuse_read_in *inarg = &req->misc.read.in; 439 struct fuse_read_in *inarg = &req->misc.read.in;
403 440
@@ -455,7 +492,7 @@ static int fuse_readpage(struct file *file, struct page *page)
455 req->out.argpages = 1; 492 req->out.argpages = 1;
456 req->num_pages = 1; 493 req->num_pages = 1;
457 req->pages[0] = page; 494 req->pages[0] = page;
458 num_read = fuse_send_read(req, file, inode, pos, count, NULL); 495 num_read = fuse_send_read(req, file, pos, count, NULL);
459 err = req->out.h.error; 496 err = req->out.h.error;
460 fuse_put_request(fc, req); 497 fuse_put_request(fc, req);
461 498
@@ -504,19 +541,18 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
504 fuse_file_put(req->ff); 541 fuse_file_put(req->ff);
505} 542}
506 543
507static void fuse_send_readpages(struct fuse_req *req, struct file *file, 544static void fuse_send_readpages(struct fuse_req *req, struct file *file)
508 struct inode *inode)
509{ 545{
510 struct fuse_conn *fc = get_fuse_conn(inode); 546 struct fuse_file *ff = file->private_data;
547 struct fuse_conn *fc = ff->fc;
511 loff_t pos = page_offset(req->pages[0]); 548 loff_t pos = page_offset(req->pages[0]);
512 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 549 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
513 550
514 req->out.argpages = 1; 551 req->out.argpages = 1;
515 req->out.page_zeroing = 1; 552 req->out.page_zeroing = 1;
516 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 553 fuse_read_fill(req, file, pos, count, FUSE_READ);
517 req->misc.read.attr_ver = fuse_get_attr_version(fc); 554 req->misc.read.attr_ver = fuse_get_attr_version(fc);
518 if (fc->async_read) { 555 if (fc->async_read) {
519 struct fuse_file *ff = file->private_data;
520 req->ff = fuse_file_get(ff); 556 req->ff = fuse_file_get(ff);
521 req->end = fuse_readpages_end; 557 req->end = fuse_readpages_end;
522 fuse_request_send_background(fc, req); 558 fuse_request_send_background(fc, req);
@@ -546,7 +582,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
546 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 582 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
547 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 583 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
548 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 584 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
549 fuse_send_readpages(req, data->file, inode); 585 fuse_send_readpages(req, data->file);
550 data->req = req = fuse_get_req(fc); 586 data->req = req = fuse_get_req(fc);
551 if (IS_ERR(req)) { 587 if (IS_ERR(req)) {
552 unlock_page(page); 588 unlock_page(page);
@@ -580,7 +616,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
580 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 616 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
581 if (!err) { 617 if (!err) {
582 if (data.req->num_pages) 618 if (data.req->num_pages)
583 fuse_send_readpages(data.req, file, inode); 619 fuse_send_readpages(data.req, file);
584 else 620 else
585 fuse_put_request(fc, data.req); 621 fuse_put_request(fc, data.req);
586 } 622 }
@@ -607,24 +643,19 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
607 return generic_file_aio_read(iocb, iov, nr_segs, pos); 643 return generic_file_aio_read(iocb, iov, nr_segs, pos);
608} 644}
609 645
610static void fuse_write_fill(struct fuse_req *req, struct file *file, 646static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
611 struct fuse_file *ff, struct inode *inode, 647 loff_t pos, size_t count)
612 loff_t pos, size_t count, int writepage)
613{ 648{
614 struct fuse_conn *fc = get_fuse_conn(inode);
615 struct fuse_write_in *inarg = &req->misc.write.in; 649 struct fuse_write_in *inarg = &req->misc.write.in;
616 struct fuse_write_out *outarg = &req->misc.write.out; 650 struct fuse_write_out *outarg = &req->misc.write.out;
617 651
618 memset(inarg, 0, sizeof(struct fuse_write_in));
619 inarg->fh = ff->fh; 652 inarg->fh = ff->fh;
620 inarg->offset = pos; 653 inarg->offset = pos;
621 inarg->size = count; 654 inarg->size = count;
622 inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
623 inarg->flags = file ? file->f_flags : 0;
624 req->in.h.opcode = FUSE_WRITE; 655 req->in.h.opcode = FUSE_WRITE;
625 req->in.h.nodeid = get_node_id(inode); 656 req->in.h.nodeid = ff->nodeid;
626 req->in.numargs = 2; 657 req->in.numargs = 2;
627 if (fc->minor < 9) 658 if (ff->fc->minor < 9)
628 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 659 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
629 else 660 else
630 req->in.args[0].size = sizeof(struct fuse_write_in); 661 req->in.args[0].size = sizeof(struct fuse_write_in);
@@ -636,13 +667,15 @@ static void fuse_write_fill(struct fuse_req *req, struct file *file,
636} 667}
637 668
638static size_t fuse_send_write(struct fuse_req *req, struct file *file, 669static size_t fuse_send_write(struct fuse_req *req, struct file *file,
639 struct inode *inode, loff_t pos, size_t count, 670 loff_t pos, size_t count, fl_owner_t owner)
640 fl_owner_t owner)
641{ 671{
642 struct fuse_conn *fc = get_fuse_conn(inode); 672 struct fuse_file *ff = file->private_data;
643 fuse_write_fill(req, file, file->private_data, inode, pos, count, 0); 673 struct fuse_conn *fc = ff->fc;
674 struct fuse_write_in *inarg = &req->misc.write.in;
675
676 fuse_write_fill(req, ff, pos, count);
677 inarg->flags = file->f_flags;
644 if (owner != NULL) { 678 if (owner != NULL) {
645 struct fuse_write_in *inarg = &req->misc.write.in;
646 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 679 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
647 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 680 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
648 } 681 }
@@ -700,7 +733,7 @@ static int fuse_buffered_write(struct file *file, struct inode *inode,
700 req->num_pages = 1; 733 req->num_pages = 1;
701 req->pages[0] = page; 734 req->pages[0] = page;
702 req->page_offset = offset; 735 req->page_offset = offset;
703 nres = fuse_send_write(req, file, inode, pos, count, NULL); 736 nres = fuse_send_write(req, file, pos, count, NULL);
704 err = req->out.h.error; 737 err = req->out.h.error;
705 fuse_put_request(fc, req); 738 fuse_put_request(fc, req);
706 if (!err && !nres) 739 if (!err && !nres)
@@ -741,7 +774,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
741 for (i = 0; i < req->num_pages; i++) 774 for (i = 0; i < req->num_pages; i++)
742 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 775 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
743 776
744 res = fuse_send_write(req, file, inode, pos, count, NULL); 777 res = fuse_send_write(req, file, pos, count, NULL);
745 778
746 offset = req->page_offset; 779 offset = req->page_offset;
747 count = res; 780 count = res;
@@ -979,25 +1012,23 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
979 return 0; 1012 return 0;
980} 1013}
981 1014
982static ssize_t fuse_direct_io(struct file *file, const char __user *buf, 1015ssize_t fuse_direct_io(struct file *file, const char __user *buf,
983 size_t count, loff_t *ppos, int write) 1016 size_t count, loff_t *ppos, int write)
984{ 1017{
985 struct inode *inode = file->f_path.dentry->d_inode; 1018 struct fuse_file *ff = file->private_data;
986 struct fuse_conn *fc = get_fuse_conn(inode); 1019 struct fuse_conn *fc = ff->fc;
987 size_t nmax = write ? fc->max_write : fc->max_read; 1020 size_t nmax = write ? fc->max_write : fc->max_read;
988 loff_t pos = *ppos; 1021 loff_t pos = *ppos;
989 ssize_t res = 0; 1022 ssize_t res = 0;
990 struct fuse_req *req; 1023 struct fuse_req *req;
991 1024
992 if (is_bad_inode(inode))
993 return -EIO;
994
995 req = fuse_get_req(fc); 1025 req = fuse_get_req(fc);
996 if (IS_ERR(req)) 1026 if (IS_ERR(req))
997 return PTR_ERR(req); 1027 return PTR_ERR(req);
998 1028
999 while (count) { 1029 while (count) {
1000 size_t nres; 1030 size_t nres;
1031 fl_owner_t owner = current->files;
1001 size_t nbytes = min(count, nmax); 1032 size_t nbytes = min(count, nmax);
1002 int err = fuse_get_user_pages(req, buf, &nbytes, write); 1033 int err = fuse_get_user_pages(req, buf, &nbytes, write);
1003 if (err) { 1034 if (err) {
@@ -1006,11 +1037,10 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1006 } 1037 }
1007 1038
1008 if (write) 1039 if (write)
1009 nres = fuse_send_write(req, file, inode, pos, nbytes, 1040 nres = fuse_send_write(req, file, pos, nbytes, owner);
1010 current->files);
1011 else 1041 else
1012 nres = fuse_send_read(req, file, inode, pos, nbytes, 1042 nres = fuse_send_read(req, file, pos, nbytes, owner);
1013 current->files); 1043
1014 fuse_release_user_pages(req, !write); 1044 fuse_release_user_pages(req, !write);
1015 if (req->out.h.error) { 1045 if (req->out.h.error) {
1016 if (!res) 1046 if (!res)
@@ -1034,20 +1064,27 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
1034 } 1064 }
1035 } 1065 }
1036 fuse_put_request(fc, req); 1066 fuse_put_request(fc, req);
1037 if (res > 0) { 1067 if (res > 0)
1038 if (write)
1039 fuse_write_update_size(inode, pos);
1040 *ppos = pos; 1068 *ppos = pos;
1041 }
1042 fuse_invalidate_attr(inode);
1043 1069
1044 return res; 1070 return res;
1045} 1071}
1072EXPORT_SYMBOL_GPL(fuse_direct_io);
1046 1073
1047static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1074static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1048 size_t count, loff_t *ppos) 1075 size_t count, loff_t *ppos)
1049{ 1076{
1050 return fuse_direct_io(file, buf, count, ppos, 0); 1077 ssize_t res;
1078 struct inode *inode = file->f_path.dentry->d_inode;
1079
1080 if (is_bad_inode(inode))
1081 return -EIO;
1082
1083 res = fuse_direct_io(file, buf, count, ppos, 0);
1084
1085 fuse_invalidate_attr(inode);
1086
1087 return res;
1051} 1088}
1052 1089
1053static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 1090static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
@@ -1055,12 +1092,22 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1055{ 1092{
1056 struct inode *inode = file->f_path.dentry->d_inode; 1093 struct inode *inode = file->f_path.dentry->d_inode;
1057 ssize_t res; 1094 ssize_t res;
1095
1096 if (is_bad_inode(inode))
1097 return -EIO;
1098
1058 /* Don't allow parallel writes to the same file */ 1099 /* Don't allow parallel writes to the same file */
1059 mutex_lock(&inode->i_mutex); 1100 mutex_lock(&inode->i_mutex);
1060 res = generic_write_checks(file, ppos, &count, 0); 1101 res = generic_write_checks(file, ppos, &count, 0);
1061 if (!res) 1102 if (!res) {
1062 res = fuse_direct_io(file, buf, count, ppos, 1); 1103 res = fuse_direct_io(file, buf, count, ppos, 1);
1104 if (res > 0)
1105 fuse_write_update_size(inode, *ppos);
1106 }
1063 mutex_unlock(&inode->i_mutex); 1107 mutex_unlock(&inode->i_mutex);
1108
1109 fuse_invalidate_attr(inode);
1110
1064 return res; 1111 return res;
1065} 1112}
1066 1113
@@ -1177,9 +1224,10 @@ static int fuse_writepage_locked(struct page *page)
1177 req->ff = fuse_file_get(ff); 1224 req->ff = fuse_file_get(ff);
1178 spin_unlock(&fc->lock); 1225 spin_unlock(&fc->lock);
1179 1226
1180 fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1); 1227 fuse_write_fill(req, ff, page_offset(page), 0);
1181 1228
1182 copy_highpage(tmp_page, page); 1229 copy_highpage(tmp_page, page);
1230 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1183 req->in.argpages = 1; 1231 req->in.argpages = 1;
1184 req->num_pages = 1; 1232 req->num_pages = 1;
1185 req->pages[0] = tmp_page; 1233 req->pages[0] = tmp_page;
@@ -1603,12 +1651,11 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1603 * limits ioctl data transfers to well-formed ioctls and is the forced 1651 * limits ioctl data transfers to well-formed ioctls and is the forced
1604 * behavior for all FUSE servers. 1652 * behavior for all FUSE servers.
1605 */ 1653 */
1606static long fuse_file_do_ioctl(struct file *file, unsigned int cmd, 1654long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1607 unsigned long arg, unsigned int flags) 1655 unsigned int flags)
1608{ 1656{
1609 struct inode *inode = file->f_dentry->d_inode;
1610 struct fuse_file *ff = file->private_data; 1657 struct fuse_file *ff = file->private_data;
1611 struct fuse_conn *fc = get_fuse_conn(inode); 1658 struct fuse_conn *fc = ff->fc;
1612 struct fuse_ioctl_in inarg = { 1659 struct fuse_ioctl_in inarg = {
1613 .fh = ff->fh, 1660 .fh = ff->fh,
1614 .cmd = cmd, 1661 .cmd = cmd,
@@ -1627,13 +1674,6 @@ static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
1627 /* assume all the iovs returned by client always fits in a page */ 1674 /* assume all the iovs returned by client always fits in a page */
1628 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 1675 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
1629 1676
1630 if (!fuse_allow_task(fc, current))
1631 return -EACCES;
1632
1633 err = -EIO;
1634 if (is_bad_inode(inode))
1635 goto out;
1636
1637 err = -ENOMEM; 1677 err = -ENOMEM;
1638 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); 1678 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
1639 iov_page = alloc_page(GFP_KERNEL); 1679 iov_page = alloc_page(GFP_KERNEL);
@@ -1694,7 +1734,7 @@ static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
1694 1734
1695 /* okay, let's send it to the client */ 1735 /* okay, let's send it to the client */
1696 req->in.h.opcode = FUSE_IOCTL; 1736 req->in.h.opcode = FUSE_IOCTL;
1697 req->in.h.nodeid = get_node_id(inode); 1737 req->in.h.nodeid = ff->nodeid;
1698 req->in.numargs = 1; 1738 req->in.numargs = 1;
1699 req->in.args[0].size = sizeof(inarg); 1739 req->in.args[0].size = sizeof(inarg);
1700 req->in.args[0].value = &inarg; 1740 req->in.args[0].value = &inarg;
@@ -1777,17 +1817,33 @@ static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
1777 1817
1778 return err ? err : outarg.result; 1818 return err ? err : outarg.result;
1779} 1819}
1820EXPORT_SYMBOL_GPL(fuse_do_ioctl);
1821
1822static long fuse_file_ioctl_common(struct file *file, unsigned int cmd,
1823 unsigned long arg, unsigned int flags)
1824{
1825 struct inode *inode = file->f_dentry->d_inode;
1826 struct fuse_conn *fc = get_fuse_conn(inode);
1827
1828 if (!fuse_allow_task(fc, current))
1829 return -EACCES;
1830
1831 if (is_bad_inode(inode))
1832 return -EIO;
1833
1834 return fuse_do_ioctl(file, cmd, arg, flags);
1835}
1780 1836
1781static long fuse_file_ioctl(struct file *file, unsigned int cmd, 1837static long fuse_file_ioctl(struct file *file, unsigned int cmd,
1782 unsigned long arg) 1838 unsigned long arg)
1783{ 1839{
1784 return fuse_file_do_ioctl(file, cmd, arg, 0); 1840 return fuse_file_ioctl_common(file, cmd, arg, 0);
1785} 1841}
1786 1842
1787static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 1843static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
1788 unsigned long arg) 1844 unsigned long arg)
1789{ 1845{
1790 return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT); 1846 return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
1791} 1847}
1792 1848
1793/* 1849/*
@@ -1841,11 +1897,10 @@ static void fuse_register_polled_file(struct fuse_conn *fc,
1841 spin_unlock(&fc->lock); 1897 spin_unlock(&fc->lock);
1842} 1898}
1843 1899
1844static unsigned fuse_file_poll(struct file *file, poll_table *wait) 1900unsigned fuse_file_poll(struct file *file, poll_table *wait)
1845{ 1901{
1846 struct inode *inode = file->f_dentry->d_inode;
1847 struct fuse_file *ff = file->private_data; 1902 struct fuse_file *ff = file->private_data;
1848 struct fuse_conn *fc = get_fuse_conn(inode); 1903 struct fuse_conn *fc = ff->fc;
1849 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 1904 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
1850 struct fuse_poll_out outarg; 1905 struct fuse_poll_out outarg;
1851 struct fuse_req *req; 1906 struct fuse_req *req;
@@ -1870,7 +1925,7 @@ static unsigned fuse_file_poll(struct file *file, poll_table *wait)
1870 return PTR_ERR(req); 1925 return PTR_ERR(req);
1871 1926
1872 req->in.h.opcode = FUSE_POLL; 1927 req->in.h.opcode = FUSE_POLL;
1873 req->in.h.nodeid = get_node_id(inode); 1928 req->in.h.nodeid = ff->nodeid;
1874 req->in.numargs = 1; 1929 req->in.numargs = 1;
1875 req->in.args[0].size = sizeof(inarg); 1930 req->in.args[0].size = sizeof(inarg);
1876 req->in.args[0].value = &inarg; 1931 req->in.args[0].value = &inarg;
@@ -1889,6 +1944,7 @@ static unsigned fuse_file_poll(struct file *file, poll_table *wait)
1889 } 1944 }
1890 return POLLERR; 1945 return POLLERR;
1891} 1946}
1947EXPORT_SYMBOL_GPL(fuse_file_poll);
1892 1948
1893/* 1949/*
1894 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 1950 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 6fc5aedaa0d5..aaf2f9ff970e 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -97,8 +97,13 @@ struct fuse_inode {
97 struct list_head writepages; 97 struct list_head writepages;
98}; 98};
99 99
100struct fuse_conn;
101
100/** FUSE specific file data */ 102/** FUSE specific file data */
101struct fuse_file { 103struct fuse_file {
104 /** Fuse connection for this file */
105 struct fuse_conn *fc;
106
102 /** Request reserved for flush and release */ 107 /** Request reserved for flush and release */
103 struct fuse_req *reserved_req; 108 struct fuse_req *reserved_req;
104 109
@@ -108,9 +113,15 @@ struct fuse_file {
108 /** File handle used by userspace */ 113 /** File handle used by userspace */
109 u64 fh; 114 u64 fh;
110 115
116 /** Node id of this file */
117 u64 nodeid;
118
111 /** Refcount */ 119 /** Refcount */
112 atomic_t count; 120 atomic_t count;
113 121
122 /** FOPEN_* flags returned by open */
123 u32 open_flags;
124
114 /** Entry on inode's write_files list */ 125 /** Entry on inode's write_files list */
115 struct list_head write_entry; 126 struct list_head write_entry;
116 127
@@ -185,8 +196,6 @@ enum fuse_req_state {
185 FUSE_REQ_FINISHED 196 FUSE_REQ_FINISHED
186}; 197};
187 198
188struct fuse_conn;
189
190/** 199/**
191 * A request to the client 200 * A request to the client
192 */ 201 */
@@ -248,11 +257,12 @@ struct fuse_req {
248 struct fuse_forget_in forget_in; 257 struct fuse_forget_in forget_in;
249 struct { 258 struct {
250 struct fuse_release_in in; 259 struct fuse_release_in in;
251 struct vfsmount *vfsmount; 260 struct path path;
252 struct dentry *dentry;
253 } release; 261 } release;
254 struct fuse_init_in init_in; 262 struct fuse_init_in init_in;
255 struct fuse_init_out init_out; 263 struct fuse_init_out init_out;
264 struct cuse_init_in cuse_init_in;
265 struct cuse_init_out cuse_init_out;
256 struct { 266 struct {
257 struct fuse_read_in in; 267 struct fuse_read_in in;
258 u64 attr_ver; 268 u64 attr_ver;
@@ -386,6 +396,9 @@ struct fuse_conn {
386 /** Filesystem supports NFS exporting. Only set in INIT */ 396 /** Filesystem supports NFS exporting. Only set in INIT */
387 unsigned export_support:1; 397 unsigned export_support:1;
388 398
399 /** Set if bdi is valid */
400 unsigned bdi_initialized:1;
401
389 /* 402 /*
390 * The following bitfields are only for optimization purposes 403 * The following bitfields are only for optimization purposes
391 * and hence races in setting them will not cause malfunction 404 * and hence races in setting them will not cause malfunction
@@ -515,25 +528,24 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
515 * Initialize READ or READDIR request 528 * Initialize READ or READDIR request
516 */ 529 */
517void fuse_read_fill(struct fuse_req *req, struct file *file, 530void fuse_read_fill(struct fuse_req *req, struct file *file,
518 struct inode *inode, loff_t pos, size_t count, int opcode); 531 loff_t pos, size_t count, int opcode);
519 532
520/** 533/**
521 * Send OPEN or OPENDIR request 534 * Send OPEN or OPENDIR request
522 */ 535 */
523int fuse_open_common(struct inode *inode, struct file *file, int isdir); 536int fuse_open_common(struct inode *inode, struct file *file, bool isdir);
524 537
525struct fuse_file *fuse_file_alloc(struct fuse_conn *fc); 538struct fuse_file *fuse_file_alloc(struct fuse_conn *fc);
539struct fuse_file *fuse_file_get(struct fuse_file *ff);
526void fuse_file_free(struct fuse_file *ff); 540void fuse_file_free(struct fuse_file *ff);
527void fuse_finish_open(struct inode *inode, struct file *file, 541void fuse_finish_open(struct inode *inode, struct file *file);
528 struct fuse_file *ff, struct fuse_open_out *outarg);
529 542
530/** Fill in ff->reserved_req with a RELEASE request */ 543void fuse_sync_release(struct fuse_file *ff, int flags);
531void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode);
532 544
533/** 545/**
534 * Send RELEASE or RELEASEDIR request 546 * Send RELEASE or RELEASEDIR request
535 */ 547 */
536int fuse_release_common(struct inode *inode, struct file *file, int isdir); 548void fuse_release_common(struct file *file, int opcode);
537 549
538/** 550/**
539 * Send FSYNC or FSYNCDIR request 551 * Send FSYNC or FSYNCDIR request
@@ -652,10 +664,12 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
652 */ 664 */
653struct fuse_conn *fuse_conn_get(struct fuse_conn *fc); 665struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
654 666
667void fuse_conn_kill(struct fuse_conn *fc);
668
655/** 669/**
656 * Initialize fuse_conn 670 * Initialize fuse_conn
657 */ 671 */
658int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb); 672void fuse_conn_init(struct fuse_conn *fc);
659 673
660/** 674/**
661 * Release reference to fuse_conn 675 * Release reference to fuse_conn
@@ -694,4 +708,13 @@ void fuse_release_nowrite(struct inode *inode);
694 708
695u64 fuse_get_attr_version(struct fuse_conn *fc); 709u64 fuse_get_attr_version(struct fuse_conn *fc);
696 710
711int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
712 bool isdir);
713ssize_t fuse_direct_io(struct file *file, const char __user *buf,
714 size_t count, loff_t *ppos, int write);
715long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
716 unsigned int flags);
717unsigned fuse_file_poll(struct file *file, poll_table *wait);
718int fuse_dev_release(struct inode *inode, struct file *file);
719
697#endif /* _FS_FUSE_I_H */ 720#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 91f7c85f1ffd..f0df55a52929 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -277,11 +277,14 @@ static void fuse_send_destroy(struct fuse_conn *fc)
277 } 277 }
278} 278}
279 279
280static void fuse_put_super(struct super_block *sb) 280static void fuse_bdi_destroy(struct fuse_conn *fc)
281{ 281{
282 struct fuse_conn *fc = get_fuse_conn_super(sb); 282 if (fc->bdi_initialized)
283 bdi_destroy(&fc->bdi);
284}
283 285
284 fuse_send_destroy(fc); 286void fuse_conn_kill(struct fuse_conn *fc)
287{
285 spin_lock(&fc->lock); 288 spin_lock(&fc->lock);
286 fc->connected = 0; 289 fc->connected = 0;
287 fc->blocked = 0; 290 fc->blocked = 0;
@@ -295,7 +298,16 @@ static void fuse_put_super(struct super_block *sb)
295 list_del(&fc->entry); 298 list_del(&fc->entry);
296 fuse_ctl_remove_conn(fc); 299 fuse_ctl_remove_conn(fc);
297 mutex_unlock(&fuse_mutex); 300 mutex_unlock(&fuse_mutex);
298 bdi_destroy(&fc->bdi); 301 fuse_bdi_destroy(fc);
302}
303EXPORT_SYMBOL_GPL(fuse_conn_kill);
304
305static void fuse_put_super(struct super_block *sb)
306{
307 struct fuse_conn *fc = get_fuse_conn_super(sb);
308
309 fuse_send_destroy(fc);
310 fuse_conn_kill(fc);
299 fuse_conn_put(fc); 311 fuse_conn_put(fc);
300} 312}
301 313
@@ -466,10 +478,8 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
466 return 0; 478 return 0;
467} 479}
468 480
469int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb) 481void fuse_conn_init(struct fuse_conn *fc)
470{ 482{
471 int err;
472
473 memset(fc, 0, sizeof(*fc)); 483 memset(fc, 0, sizeof(*fc));
474 spin_lock_init(&fc->lock); 484 spin_lock_init(&fc->lock);
475 mutex_init(&fc->inst_mutex); 485 mutex_init(&fc->inst_mutex);
@@ -484,49 +494,12 @@ int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb)
484 INIT_LIST_HEAD(&fc->bg_queue); 494 INIT_LIST_HEAD(&fc->bg_queue);
485 INIT_LIST_HEAD(&fc->entry); 495 INIT_LIST_HEAD(&fc->entry);
486 atomic_set(&fc->num_waiting, 0); 496 atomic_set(&fc->num_waiting, 0);
487 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
488 fc->bdi.unplug_io_fn = default_unplug_io_fn;
489 /* fuse does it's own writeback accounting */
490 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
491 fc->khctr = 0; 497 fc->khctr = 0;
492 fc->polled_files = RB_ROOT; 498 fc->polled_files = RB_ROOT;
493 fc->dev = sb->s_dev;
494 err = bdi_init(&fc->bdi);
495 if (err)
496 goto error_mutex_destroy;
497 if (sb->s_bdev) {
498 err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
499 MAJOR(fc->dev), MINOR(fc->dev));
500 } else {
501 err = bdi_register_dev(&fc->bdi, fc->dev);
502 }
503 if (err)
504 goto error_bdi_destroy;
505 /*
506 * For a single fuse filesystem use max 1% of dirty +
507 * writeback threshold.
508 *
509 * This gives about 1M of write buffer for memory maps on a
510 * machine with 1G and 10% dirty_ratio, which should be more
511 * than enough.
512 *
513 * Privileged users can raise it by writing to
514 *
515 * /sys/class/bdi/<bdi>/max_ratio
516 */
517 bdi_set_max_ratio(&fc->bdi, 1);
518 fc->reqctr = 0; 499 fc->reqctr = 0;
519 fc->blocked = 1; 500 fc->blocked = 1;
520 fc->attr_version = 1; 501 fc->attr_version = 1;
521 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 502 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
522
523 return 0;
524
525 error_bdi_destroy:
526 bdi_destroy(&fc->bdi);
527 error_mutex_destroy:
528 mutex_destroy(&fc->inst_mutex);
529 return err;
530} 503}
531EXPORT_SYMBOL_GPL(fuse_conn_init); 504EXPORT_SYMBOL_GPL(fuse_conn_init);
532 505
@@ -539,12 +512,14 @@ void fuse_conn_put(struct fuse_conn *fc)
539 fc->release(fc); 512 fc->release(fc);
540 } 513 }
541} 514}
515EXPORT_SYMBOL_GPL(fuse_conn_put);
542 516
543struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) 517struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
544{ 518{
545 atomic_inc(&fc->count); 519 atomic_inc(&fc->count);
546 return fc; 520 return fc;
547} 521}
522EXPORT_SYMBOL_GPL(fuse_conn_get);
548 523
549static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) 524static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
550{ 525{
@@ -797,6 +772,48 @@ static void fuse_free_conn(struct fuse_conn *fc)
797 kfree(fc); 772 kfree(fc);
798} 773}
799 774
775static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
776{
777 int err;
778
779 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
780 fc->bdi.unplug_io_fn = default_unplug_io_fn;
781 /* fuse does it's own writeback accounting */
782 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
783
784 err = bdi_init(&fc->bdi);
785 if (err)
786 return err;
787
788 fc->bdi_initialized = 1;
789
790 if (sb->s_bdev) {
791 err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
792 MAJOR(fc->dev), MINOR(fc->dev));
793 } else {
794 err = bdi_register_dev(&fc->bdi, fc->dev);
795 }
796
797 if (err)
798 return err;
799
800 /*
801 * For a single fuse filesystem use max 1% of dirty +
802 * writeback threshold.
803 *
804 * This gives about 1M of write buffer for memory maps on a
805 * machine with 1G and 10% dirty_ratio, which should be more
806 * than enough.
807 *
808 * Privileged users can raise it by writing to
809 *
810 * /sys/class/bdi/<bdi>/max_ratio
811 */
812 bdi_set_max_ratio(&fc->bdi, 1);
813
814 return 0;
815}
816
800static int fuse_fill_super(struct super_block *sb, void *data, int silent) 817static int fuse_fill_super(struct super_block *sb, void *data, int silent)
801{ 818{
802 struct fuse_conn *fc; 819 struct fuse_conn *fc;
@@ -843,11 +860,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
843 if (!fc) 860 if (!fc)
844 goto err_fput; 861 goto err_fput;
845 862
846 err = fuse_conn_init(fc, sb); 863 fuse_conn_init(fc);
847 if (err) { 864
848 kfree(fc); 865 fc->dev = sb->s_dev;
849 goto err_fput; 866 err = fuse_bdi_init(fc, sb);
850 } 867 if (err)
868 goto err_put_conn;
851 869
852 fc->release = fuse_free_conn; 870 fc->release = fuse_free_conn;
853 fc->flags = d.flags; 871 fc->flags = d.flags;
@@ -911,7 +929,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
911 err_put_root: 929 err_put_root:
912 dput(root_dentry); 930 dput(root_dentry);
913 err_put_conn: 931 err_put_conn:
914 bdi_destroy(&fc->bdi); 932 fuse_bdi_destroy(fc);
915 fuse_conn_put(fc); 933 fuse_conn_put(fc);
916 err_fput: 934 err_fput:
917 fput(file); 935 fput(file);
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index d53a9bea1c2f..3da2f1f4f738 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -1,3 +1,4 @@
1EXTRA_CFLAGS := -I$(src)
1obj-$(CONFIG_GFS2_FS) += gfs2.o 2obj-$(CONFIG_GFS2_FS) += gfs2.o
2gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \ 3gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \
3 glops.o inode.o log.o lops.o main.o meta_io.o \ 4 glops.o inode.o log.o lops.o main.o meta_io.o \
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 329763530dc0..6d47379e794b 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -25,6 +25,7 @@
25#include "trans.h" 25#include "trans.h"
26#include "dir.h" 26#include "dir.h"
27#include "util.h" 27#include "util.h"
28#include "trace_gfs2.h"
28 29
29/* This doesn't need to be that large as max 64 bit pointers in a 4k 30/* This doesn't need to be that large as max 64 bit pointers in a 4k
30 * block is 512, so __u16 is fine for that. It saves stack space to 31 * block is 512, so __u16 is fine for that. It saves stack space to
@@ -589,6 +590,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
589 clear_buffer_mapped(bh_map); 590 clear_buffer_mapped(bh_map);
590 clear_buffer_new(bh_map); 591 clear_buffer_new(bh_map);
591 clear_buffer_boundary(bh_map); 592 clear_buffer_boundary(bh_map);
593 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
592 if (gfs2_is_dir(ip)) { 594 if (gfs2_is_dir(ip)) {
593 bsize = sdp->sd_jbsize; 595 bsize = sdp->sd_jbsize;
594 arr = sdp->sd_jheightsize; 596 arr = sdp->sd_jheightsize;
@@ -623,6 +625,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
623 ret = 0; 625 ret = 0;
624out: 626out:
625 release_metapath(&mp); 627 release_metapath(&mp);
628 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
626 bmap_unlock(ip, create); 629 bmap_unlock(ip, create);
627 return ret; 630 return ret;
628 631
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2bf62bcc5181..297421c0427a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -39,6 +39,8 @@
39#include "super.h" 39#include "super.h"
40#include "util.h" 40#include "util.h"
41#include "bmap.h" 41#include "bmap.h"
42#define CREATE_TRACE_POINTS
43#include "trace_gfs2.h"
42 44
43struct gfs2_gl_hash_bucket { 45struct gfs2_gl_hash_bucket {
44 struct hlist_head hb_list; 46 struct hlist_head hb_list;
@@ -155,7 +157,7 @@ static void glock_free(struct gfs2_glock *gl)
155 157
156 if (aspace) 158 if (aspace)
157 gfs2_aspace_put(aspace); 159 gfs2_aspace_put(aspace);
158 160 trace_gfs2_glock_put(gl);
159 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); 161 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
160} 162}
161 163
@@ -317,14 +319,17 @@ restart:
317 return 2; 319 return 2;
318 gh->gh_error = ret; 320 gh->gh_error = ret;
319 list_del_init(&gh->gh_list); 321 list_del_init(&gh->gh_list);
322 trace_gfs2_glock_queue(gh, 0);
320 gfs2_holder_wake(gh); 323 gfs2_holder_wake(gh);
321 goto restart; 324 goto restart;
322 } 325 }
323 set_bit(HIF_HOLDER, &gh->gh_iflags); 326 set_bit(HIF_HOLDER, &gh->gh_iflags);
327 trace_gfs2_promote(gh, 1);
324 gfs2_holder_wake(gh); 328 gfs2_holder_wake(gh);
325 goto restart; 329 goto restart;
326 } 330 }
327 set_bit(HIF_HOLDER, &gh->gh_iflags); 331 set_bit(HIF_HOLDER, &gh->gh_iflags);
332 trace_gfs2_promote(gh, 0);
328 gfs2_holder_wake(gh); 333 gfs2_holder_wake(gh);
329 continue; 334 continue;
330 } 335 }
@@ -354,6 +359,7 @@ static inline void do_error(struct gfs2_glock *gl, const int ret)
354 else 359 else
355 continue; 360 continue;
356 list_del_init(&gh->gh_list); 361 list_del_init(&gh->gh_list);
362 trace_gfs2_glock_queue(gh, 0);
357 gfs2_holder_wake(gh); 363 gfs2_holder_wake(gh);
358 } 364 }
359} 365}
@@ -422,6 +428,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
422 int rv; 428 int rv;
423 429
424 spin_lock(&gl->gl_spin); 430 spin_lock(&gl->gl_spin);
431 trace_gfs2_glock_state_change(gl, state);
425 state_change(gl, state); 432 state_change(gl, state);
426 gh = find_first_waiter(gl); 433 gh = find_first_waiter(gl);
427 434
@@ -851,6 +858,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
851 gl->gl_demote_state != state) { 858 gl->gl_demote_state != state) {
852 gl->gl_demote_state = LM_ST_UNLOCKED; 859 gl->gl_demote_state = LM_ST_UNLOCKED;
853 } 860 }
861 trace_gfs2_demote_rq(gl);
854} 862}
855 863
856/** 864/**
@@ -936,6 +944,7 @@ fail:
936 goto do_cancel; 944 goto do_cancel;
937 return; 945 return;
938 } 946 }
947 trace_gfs2_glock_queue(gh, 1);
939 list_add_tail(&gh->gh_list, insert_pt); 948 list_add_tail(&gh->gh_list, insert_pt);
940do_cancel: 949do_cancel:
941 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 950 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
@@ -1032,6 +1041,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1032 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1041 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1033 fast_path = 1; 1042 fast_path = 1;
1034 } 1043 }
1044 trace_gfs2_glock_queue(gh, 0);
1035 spin_unlock(&gl->gl_spin); 1045 spin_unlock(&gl->gl_spin);
1036 if (likely(fast_path)) 1046 if (likely(fast_path))
1037 return; 1047 return;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f2e449c595b4..13c6237c5f67 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -28,6 +28,7 @@
28#include "meta_io.h" 28#include "meta_io.h"
29#include "util.h" 29#include "util.h"
30#include "dir.h" 30#include "dir.h"
31#include "trace_gfs2.h"
31 32
32#define PULL 1 33#define PULL 1
33 34
@@ -313,6 +314,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
313 gfs2_log_lock(sdp); 314 gfs2_log_lock(sdp);
314 } 315 }
315 atomic_sub(blks, &sdp->sd_log_blks_free); 316 atomic_sub(blks, &sdp->sd_log_blks_free);
317 trace_gfs2_log_blocks(sdp, -blks);
316 gfs2_log_unlock(sdp); 318 gfs2_log_unlock(sdp);
317 mutex_unlock(&sdp->sd_log_reserve_mutex); 319 mutex_unlock(&sdp->sd_log_reserve_mutex);
318 320
@@ -333,6 +335,7 @@ void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
333 335
334 gfs2_log_lock(sdp); 336 gfs2_log_lock(sdp);
335 atomic_add(blks, &sdp->sd_log_blks_free); 337 atomic_add(blks, &sdp->sd_log_blks_free);
338 trace_gfs2_log_blocks(sdp, blks);
336 gfs2_assert_withdraw(sdp, 339 gfs2_assert_withdraw(sdp,
337 atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); 340 atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
338 gfs2_log_unlock(sdp); 341 gfs2_log_unlock(sdp);
@@ -558,6 +561,7 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
558 561
559 gfs2_log_lock(sdp); 562 gfs2_log_lock(sdp);
560 atomic_add(dist, &sdp->sd_log_blks_free); 563 atomic_add(dist, &sdp->sd_log_blks_free);
564 trace_gfs2_log_blocks(sdp, dist);
561 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); 565 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
562 gfs2_log_unlock(sdp); 566 gfs2_log_unlock(sdp);
563 567
@@ -715,6 +719,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
715 up_write(&sdp->sd_log_flush_lock); 719 up_write(&sdp->sd_log_flush_lock);
716 return; 720 return;
717 } 721 }
722 trace_gfs2_log_flush(sdp, 1);
718 723
719 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); 724 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
720 INIT_LIST_HEAD(&ai->ai_ail1_list); 725 INIT_LIST_HEAD(&ai->ai_ail1_list);
@@ -746,6 +751,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
746 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ 751 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
747 gfs2_log_lock(sdp); 752 gfs2_log_lock(sdp);
748 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 753 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
754 trace_gfs2_log_blocks(sdp, -1);
749 gfs2_log_unlock(sdp); 755 gfs2_log_unlock(sdp);
750 log_write_header(sdp, 0, PULL); 756 log_write_header(sdp, 0, PULL);
751 } 757 }
@@ -763,7 +769,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
763 ai = NULL; 769 ai = NULL;
764 } 770 }
765 gfs2_log_unlock(sdp); 771 gfs2_log_unlock(sdp);
766 772 trace_gfs2_log_flush(sdp, 0);
767 up_write(&sdp->sd_log_flush_lock); 773 up_write(&sdp->sd_log_flush_lock);
768 774
769 kfree(ai); 775 kfree(ai);
@@ -787,6 +793,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
787 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved); 793 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
788 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved; 794 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
789 atomic_add(unused, &sdp->sd_log_blks_free); 795 atomic_add(unused, &sdp->sd_log_blks_free);
796 trace_gfs2_log_blocks(sdp, unused);
790 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 797 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
791 sdp->sd_jdesc->jd_blocks); 798 sdp->sd_jdesc->jd_blocks);
792 sdp->sd_log_blks_reserved = reserved; 799 sdp->sd_log_blks_reserved = reserved;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 00315f50fa46..9969ff062c5b 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -27,6 +27,7 @@
27#include "rgrp.h" 27#include "rgrp.h"
28#include "trans.h" 28#include "trans.h"
29#include "util.h" 29#include "util.h"
30#include "trace_gfs2.h"
30 31
31/** 32/**
32 * gfs2_pin - Pin a buffer in memory 33 * gfs2_pin - Pin a buffer in memory
@@ -53,6 +54,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
53 if (bd->bd_ail) 54 if (bd->bd_ail)
54 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); 55 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
55 get_bh(bh); 56 get_bh(bh);
57 trace_gfs2_pin(bd, 1);
56} 58}
57 59
58/** 60/**
@@ -89,6 +91,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
89 bd->bd_ail = ai; 91 bd->bd_ail = ai;
90 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); 92 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
91 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 93 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
94 trace_gfs2_pin(bd, 0);
92 gfs2_log_unlock(sdp); 95 gfs2_log_unlock(sdp);
93 unlock_buffer(bh); 96 unlock_buffer(bh);
94} 97}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cc34f271b3e7..7bc3c45cd676 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -33,6 +33,7 @@
33#include "log.h" 33#include "log.h"
34#include "quota.h" 34#include "quota.h"
35#include "dir.h" 35#include "dir.h"
36#include "trace_gfs2.h"
36 37
37#define DO 0 38#define DO 0
38#define UNDO 1 39#define UNDO 1
@@ -775,6 +776,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
775 /* Map the extents for this journal's blocks */ 776 /* Map the extents for this journal's blocks */
776 map_journal_extents(sdp); 777 map_journal_extents(sdp);
777 } 778 }
779 trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
778 780
779 if (sdp->sd_lockstruct.ls_first) { 781 if (sdp->sd_lockstruct.ls_first) {
780 unsigned int x; 782 unsigned int x;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index de3239731db8..daa4ae341a29 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -29,6 +29,7 @@
29#include "util.h" 29#include "util.h"
30#include "log.h" 30#include "log.h"
31#include "inode.h" 31#include "inode.h"
32#include "trace_gfs2.h"
32 33
33#define BFITNOENT ((u32)~0) 34#define BFITNOENT ((u32)~0)
34#define NO_BLOCK ((u64)~0) 35#define NO_BLOCK ((u64)~0)
@@ -1519,7 +1520,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
1519 spin_lock(&sdp->sd_rindex_spin); 1520 spin_lock(&sdp->sd_rindex_spin);
1520 rgd->rd_free_clone -= *n; 1521 rgd->rd_free_clone -= *n;
1521 spin_unlock(&sdp->sd_rindex_spin); 1522 spin_unlock(&sdp->sd_rindex_spin);
1522 1523 trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED);
1523 *bn = block; 1524 *bn = block;
1524 return 0; 1525 return 0;
1525 1526
@@ -1571,7 +1572,7 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1571 spin_lock(&sdp->sd_rindex_spin); 1572 spin_lock(&sdp->sd_rindex_spin);
1572 rgd->rd_free_clone--; 1573 rgd->rd_free_clone--;
1573 spin_unlock(&sdp->sd_rindex_spin); 1574 spin_unlock(&sdp->sd_rindex_spin);
1574 1575 trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE);
1575 return block; 1576 return block;
1576} 1577}
1577 1578
@@ -1591,7 +1592,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1591 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1592 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1592 if (!rgd) 1593 if (!rgd)
1593 return; 1594 return;
1594 1595 trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
1595 rgd->rd_free += blen; 1596 rgd->rd_free += blen;
1596 1597
1597 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1598 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
@@ -1619,7 +1620,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1619 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1620 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1620 if (!rgd) 1621 if (!rgd)
1621 return; 1622 return;
1622 1623 trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
1623 rgd->rd_free += blen; 1624 rgd->rd_free += blen;
1624 1625
1625 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1626 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
@@ -1642,6 +1643,7 @@ void gfs2_unlink_di(struct inode *inode)
1642 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); 1643 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1643 if (!rgd) 1644 if (!rgd)
1644 return; 1645 return;
1646 trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED);
1645 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1647 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1646 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 1648 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1647 gfs2_trans_add_rg(rgd); 1649 gfs2_trans_add_rg(rgd);
@@ -1673,6 +1675,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1673void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 1675void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1674{ 1676{
1675 gfs2_free_uninit_di(rgd, ip->i_no_addr); 1677 gfs2_free_uninit_di(rgd, ip->i_no_addr);
1678 trace_gfs2_block_alloc(ip, ip->i_no_addr, 1, GFS2_BLKST_FREE);
1676 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1679 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1677 gfs2_meta_wipe(ip, ip->i_no_addr, 1); 1680 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
1678} 1681}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c8930b31cdf0..0a6801336470 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -719,8 +719,6 @@ static void gfs2_put_super(struct super_block *sb)
719 int error; 719 int error;
720 struct gfs2_jdesc *jd; 720 struct gfs2_jdesc *jd;
721 721
722 lock_kernel();
723
724 /* Unfreeze the filesystem, if we need to */ 722 /* Unfreeze the filesystem, if we need to */
725 723
726 mutex_lock(&sdp->sd_freeze_lock); 724 mutex_lock(&sdp->sd_freeze_lock);
@@ -787,8 +785,6 @@ restart:
787 785
788 /* At this point, we're through participating in the lockspace */ 786 /* At this point, we're through participating in the lockspace */
789 gfs2_sys_fs_del(sdp); 787 gfs2_sys_fs_del(sdp);
790
791 unlock_kernel();
792} 788}
793 789
794/** 790/**
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
new file mode 100644
index 000000000000..98d6ef1c1dc0
--- /dev/null
+++ b/fs/gfs2/trace_gfs2.h
@@ -0,0 +1,407 @@
1#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_GFS2_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM gfs2
8#define TRACE_INCLUDE_FILE trace_gfs2
9
10#include <linux/fs.h>
11#include <linux/buffer_head.h>
12#include <linux/dlmconstants.h>
13#include <linux/gfs2_ondisk.h>
14#include "incore.h"
15#include "glock.h"
16
17#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
18#define glock_trace_name(x) __print_symbolic(x, \
19 dlm_state_name(IV), \
20 dlm_state_name(NL), \
21 dlm_state_name(CR), \
22 dlm_state_name(CW), \
23 dlm_state_name(PR), \
24 dlm_state_name(PW), \
25 dlm_state_name(EX))
26
27#define block_state_name(x) __print_symbolic(x, \
28 { GFS2_BLKST_FREE, "free" }, \
29 { GFS2_BLKST_USED, "used" }, \
30 { GFS2_BLKST_DINODE, "dinode" }, \
31 { GFS2_BLKST_UNLINKED, "unlinked" })
32
33#define show_glock_flags(flags) __print_flags(flags, "", \
34 {(1UL << GLF_LOCK), "l" }, \
35 {(1UL << GLF_DEMOTE), "D" }, \
36 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
37 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
38 {(1UL << GLF_DIRTY), "y" }, \
39 {(1UL << GLF_LFLUSH), "f" }, \
40 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
41 {(1UL << GLF_REPLY_PENDING), "r" }, \
42 {(1UL << GLF_INITIAL), "I" }, \
43 {(1UL << GLF_FROZEN), "F" })
44
45#ifndef NUMPTY
46#define NUMPTY
47static inline u8 glock_trace_state(unsigned int state)
48{
49 switch(state) {
50 case LM_ST_SHARED:
51 return DLM_LOCK_PR;
52 case LM_ST_DEFERRED:
53 return DLM_LOCK_CW;
54 case LM_ST_EXCLUSIVE:
55 return DLM_LOCK_EX;
56 }
57 return DLM_LOCK_NL;
58}
59#endif
60
61/* Section 1 - Locking
62 *
63 * Objectives:
64 * Latency: Remote demote request to state change
65 * Latency: Local lock request to state change
66 * Latency: State change to lock grant
67 * Correctness: Ordering of local lock state vs. I/O requests
68 * Correctness: Responses to remote demote requests
69 */
70
71/* General glock state change (DLM lock request completes) */
72TRACE_EVENT(gfs2_glock_state_change,
73
74 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
75
76 TP_ARGS(gl, new_state),
77
78 TP_STRUCT__entry(
79 __field( dev_t, dev )
80 __field( u64, glnum )
81 __field( u32, gltype )
82 __field( u8, cur_state )
83 __field( u8, new_state )
84 __field( u8, dmt_state )
85 __field( u8, tgt_state )
86 __field( unsigned long, flags )
87 ),
88
89 TP_fast_assign(
90 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
91 __entry->glnum = gl->gl_name.ln_number;
92 __entry->gltype = gl->gl_name.ln_type;
93 __entry->cur_state = glock_trace_state(gl->gl_state);
94 __entry->new_state = glock_trace_state(new_state);
95 __entry->tgt_state = glock_trace_state(gl->gl_target);
96 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
97 __entry->flags = gl->gl_flags;
98 ),
99
100 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
101 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
102 (unsigned long long)__entry->glnum,
103 glock_trace_name(__entry->cur_state),
104 glock_trace_name(__entry->new_state),
105 glock_trace_name(__entry->tgt_state),
106 glock_trace_name(__entry->dmt_state),
107 show_glock_flags(__entry->flags))
108);
109
110/* State change -> unlocked, glock is being deallocated */
111TRACE_EVENT(gfs2_glock_put,
112
113 TP_PROTO(const struct gfs2_glock *gl),
114
115 TP_ARGS(gl),
116
117 TP_STRUCT__entry(
118 __field( dev_t, dev )
119 __field( u64, glnum )
120 __field( u32, gltype )
121 __field( u8, cur_state )
122 __field( unsigned long, flags )
123 ),
124
125 TP_fast_assign(
126 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
127 __entry->gltype = gl->gl_name.ln_type;
128 __entry->glnum = gl->gl_name.ln_number;
129 __entry->cur_state = glock_trace_state(gl->gl_state);
130 __entry->flags = gl->gl_flags;
131 ),
132
133 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
134 MAJOR(__entry->dev), MINOR(__entry->dev),
135 __entry->gltype, (unsigned long long)__entry->glnum,
136 glock_trace_name(__entry->cur_state),
137 glock_trace_name(DLM_LOCK_IV),
138 show_glock_flags(__entry->flags))
139
140);
141
142/* Callback (local or remote) requesting lock demotion */
143TRACE_EVENT(gfs2_demote_rq,
144
145 TP_PROTO(const struct gfs2_glock *gl),
146
147 TP_ARGS(gl),
148
149 TP_STRUCT__entry(
150 __field( dev_t, dev )
151 __field( u64, glnum )
152 __field( u32, gltype )
153 __field( u8, cur_state )
154 __field( u8, dmt_state )
155 __field( unsigned long, flags )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
160 __entry->gltype = gl->gl_name.ln_type;
161 __entry->glnum = gl->gl_name.ln_number;
162 __entry->cur_state = glock_trace_state(gl->gl_state);
163 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
164 __entry->flags = gl->gl_flags;
165 ),
166
167 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
168 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
169 (unsigned long long)__entry->glnum,
170 glock_trace_name(__entry->cur_state),
171 glock_trace_name(__entry->dmt_state),
172 show_glock_flags(__entry->flags))
173
174);
175
176/* Promotion/grant of a glock */
177TRACE_EVENT(gfs2_promote,
178
179 TP_PROTO(const struct gfs2_holder *gh, int first),
180
181 TP_ARGS(gh, first),
182
183 TP_STRUCT__entry(
184 __field( dev_t, dev )
185 __field( u64, glnum )
186 __field( u32, gltype )
187 __field( int, first )
188 __field( u8, state )
189 ),
190
191 TP_fast_assign(
192 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
193 __entry->glnum = gh->gh_gl->gl_name.ln_number;
194 __entry->gltype = gh->gh_gl->gl_name.ln_type;
195 __entry->first = first;
196 __entry->state = glock_trace_state(gh->gh_state);
197 ),
198
199 TP_printk("%u,%u glock %u:%llu promote %s %s",
200 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
201 (unsigned long long)__entry->glnum,
202 __entry->first ? "first": "other",
203 glock_trace_name(__entry->state))
204);
205
206/* Queue/dequeue a lock request */
207TRACE_EVENT(gfs2_glock_queue,
208
209 TP_PROTO(const struct gfs2_holder *gh, int queue),
210
211 TP_ARGS(gh, queue),
212
213 TP_STRUCT__entry(
214 __field( dev_t, dev )
215 __field( u64, glnum )
216 __field( u32, gltype )
217 __field( int, queue )
218 __field( u8, state )
219 ),
220
221 TP_fast_assign(
222 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
223 __entry->glnum = gh->gh_gl->gl_name.ln_number;
224 __entry->gltype = gh->gh_gl->gl_name.ln_type;
225 __entry->queue = queue;
226 __entry->state = glock_trace_state(gh->gh_state);
227 ),
228
229 TP_printk("%u,%u glock %u:%llu %squeue %s",
230 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
231 (unsigned long long)__entry->glnum,
232 __entry->queue ? "" : "de",
233 glock_trace_name(__entry->state))
234);
235
236/* Section 2 - Log/journal
237 *
238 * Objectives:
239 * Latency: Log flush time
240 * Correctness: pin/unpin vs. disk I/O ordering
241 * Performance: Log usage stats
242 */
243
244/* Pin/unpin a block in the log */
245TRACE_EVENT(gfs2_pin,
246
247 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
248
249 TP_ARGS(bd, pin),
250
251 TP_STRUCT__entry(
252 __field( dev_t, dev )
253 __field( int, pin )
254 __field( u32, len )
255 __field( sector_t, block )
256 __field( u64, ino )
257 ),
258
259 TP_fast_assign(
260 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
261 __entry->pin = pin;
262 __entry->len = bd->bd_bh->b_size;
263 __entry->block = bd->bd_bh->b_blocknr;
264 __entry->ino = bd->bd_gl->gl_name.ln_number;
265 ),
266
267 TP_printk("%u,%u log %s %llu/%lu inode %llu",
268 MAJOR(__entry->dev), MINOR(__entry->dev),
269 __entry->pin ? "pin" : "unpin",
270 (unsigned long long)__entry->block,
271 (unsigned long)__entry->len,
272 (unsigned long long)__entry->ino)
273);
274
275/* Flushing the log */
276TRACE_EVENT(gfs2_log_flush,
277
278 TP_PROTO(const struct gfs2_sbd *sdp, int start),
279
280 TP_ARGS(sdp, start),
281
282 TP_STRUCT__entry(
283 __field( dev_t, dev )
284 __field( int, start )
285 __field( u64, log_seq )
286 ),
287
288 TP_fast_assign(
289 __entry->dev = sdp->sd_vfs->s_dev;
290 __entry->start = start;
291 __entry->log_seq = sdp->sd_log_sequence;
292 ),
293
294 TP_printk("%u,%u log flush %s %llu",
295 MAJOR(__entry->dev), MINOR(__entry->dev),
296 __entry->start ? "start" : "end",
297 (unsigned long long)__entry->log_seq)
298);
299
300/* Reserving/releasing blocks in the log */
301TRACE_EVENT(gfs2_log_blocks,
302
303 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
304
305 TP_ARGS(sdp, blocks),
306
307 TP_STRUCT__entry(
308 __field( dev_t, dev )
309 __field( int, blocks )
310 ),
311
312 TP_fast_assign(
313 __entry->dev = sdp->sd_vfs->s_dev;
314 __entry->blocks = blocks;
315 ),
316
317 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
318 MINOR(__entry->dev), __entry->blocks)
319);
320
321/* Section 3 - bmap
322 *
323 * Objectives:
324 * Latency: Bmap request time
325 * Performance: Block allocator tracing
326 * Correctness: Test of disard generation vs. blocks allocated
327 */
328
329/* Map an extent of blocks, possibly a new allocation */
330TRACE_EVENT(gfs2_bmap,
331
332 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
333 sector_t lblock, int create, int errno),
334
335 TP_ARGS(ip, bh, lblock, create, errno),
336
337 TP_STRUCT__entry(
338 __field( dev_t, dev )
339 __field( sector_t, lblock )
340 __field( sector_t, pblock )
341 __field( u64, inum )
342 __field( unsigned long, state )
343 __field( u32, len )
344 __field( int, create )
345 __field( int, errno )
346 ),
347
348 TP_fast_assign(
349 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
350 __entry->lblock = lblock;
351 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
352 __entry->inum = ip->i_no_addr;
353 __entry->state = bh->b_state;
354 __entry->len = bh->b_size;
355 __entry->create = create;
356 __entry->errno = errno;
357 ),
358
359 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
360 MAJOR(__entry->dev), MINOR(__entry->dev),
361 (unsigned long long)__entry->inum,
362 (unsigned long long)__entry->lblock,
363 (unsigned long)__entry->len,
364 (unsigned long long)__entry->pblock,
365 __entry->state, __entry->create ? "create " : "nocreate",
366 __entry->errno)
367);
368
369/* Keep track of blocks as they are allocated/freed */
370TRACE_EVENT(gfs2_block_alloc,
371
372 TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
373 u8 block_state),
374
375 TP_ARGS(ip, block, len, block_state),
376
377 TP_STRUCT__entry(
378 __field( dev_t, dev )
379 __field( u64, start )
380 __field( u64, inum )
381 __field( u32, len )
382 __field( u8, block_state )
383 ),
384
385 TP_fast_assign(
386 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
387 __entry->start = block;
388 __entry->inum = ip->i_no_addr;
389 __entry->len = len;
390 __entry->block_state = block_state;
391 ),
392
393 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
394 MAJOR(__entry->dev), MINOR(__entry->dev),
395 (unsigned long long)__entry->inum,
396 (unsigned long long)__entry->start,
397 (unsigned long)__entry->len,
398 block_state_name(__entry->block_state))
399);
400
401#endif /* _TRACE_GFS2_H */
402
403/* This part must be outside protection */
404#undef TRACE_INCLUDE_PATH
405#define TRACE_INCLUDE_PATH .
406#include <trace/define_trace.h>
407
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 0af36085eb28..1a9c7878f864 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -556,27 +556,49 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
556 556
557 /* add partitions */ 557 /* add partitions */
558 for (p = 1; p < state->limit; p++) { 558 for (p = 1; p < state->limit; p++) {
559 sector_t size = state->parts[p].size; 559 sector_t size, from;
560 sector_t from = state->parts[p].from; 560try_scan:
561 size = state->parts[p].size;
561 if (!size) 562 if (!size)
562 continue; 563 continue;
564
565 from = state->parts[p].from;
563 if (from >= get_capacity(disk)) { 566 if (from >= get_capacity(disk)) {
564 printk(KERN_WARNING 567 printk(KERN_WARNING
565 "%s: p%d ignored, start %llu is behind the end of the disk\n", 568 "%s: p%d ignored, start %llu is behind the end of the disk\n",
566 disk->disk_name, p, (unsigned long long) from); 569 disk->disk_name, p, (unsigned long long) from);
567 continue; 570 continue;
568 } 571 }
572
569 if (from + size > get_capacity(disk)) { 573 if (from + size > get_capacity(disk)) {
570 /* 574 struct block_device_operations *bdops = disk->fops;
571 * we can not ignore partitions of broken tables 575 unsigned long long capacity;
572 * created by for example camera firmware, but we 576
573 * limit them to the end of the disk to avoid
574 * creating invalid block devices
575 */
576 printk(KERN_WARNING 577 printk(KERN_WARNING
577 "%s: p%d size %llu limited to end of disk\n", 578 "%s: p%d size %llu exceeds device capacity, ",
578 disk->disk_name, p, (unsigned long long) size); 579 disk->disk_name, p, (unsigned long long) size);
579 size = get_capacity(disk) - from; 580
581 if (bdops->set_capacity &&
582 (disk->flags & GENHD_FL_NATIVE_CAPACITY) == 0) {
583 printk(KERN_CONT "enabling native capacity\n");
584 capacity = bdops->set_capacity(disk, ~0ULL);
585 disk->flags |= GENHD_FL_NATIVE_CAPACITY;
586 if (capacity > get_capacity(disk)) {
587 set_capacity(disk, capacity);
588 check_disk_size_change(disk, bdev);
589 bdev->bd_invalidated = 0;
590 }
591 goto try_scan;
592 } else {
593 /*
594 * we can not ignore partitions of broken tables
595 * created by for example camera firmware, but
596 * we limit them to the end of the disk to avoid
597 * creating invalid block devices
598 */
599 printk(KERN_CONT "limited to end of disk\n");
600 size = get_capacity(disk) - from;
601 }
580 } 602 }
581 part = add_partition(disk, p, from, size, 603 part = add_partition(disk, p, from, size,
582 state->parts[p].flags); 604 state->parts[p].flags);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b84d8ae35e6f..d4ddc22e46bb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -86,7 +86,17 @@ struct drm_device;
86 86
87#include "drm_os_linux.h" 87#include "drm_os_linux.h"
88#include "drm_hashtab.h" 88#include "drm_hashtab.h"
89#include "drm_mm.h"
89 90
91#define DRM_UT_CORE 0x01
92#define DRM_UT_DRIVER 0x02
93#define DRM_UT_KMS 0x04
94#define DRM_UT_MODE 0x08
95
96extern void drm_ut_debug_printk(unsigned int request_level,
97 const char *prefix,
98 const char *function_name,
99 const char *format, ...);
90/***********************************************************************/ 100/***********************************************************************/
91/** \name DRM template customization defaults */ 101/** \name DRM template customization defaults */
92/*@{*/ 102/*@{*/
@@ -186,15 +196,57 @@ struct drm_device;
186 * \param arg arguments 196 * \param arg arguments
187 */ 197 */
188#if DRM_DEBUG_CODE 198#if DRM_DEBUG_CODE
189#define DRM_DEBUG(fmt, arg...) \ 199#define DRM_DEBUG(fmt, args...) \
200 do { \
201 drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
202 __func__, fmt, ##args); \
203 } while (0)
204
205#define DRM_DEBUG_DRIVER(prefix, fmt, args...) \
206 do { \
207 drm_ut_debug_printk(DRM_UT_DRIVER, prefix, \
208 __func__, fmt, ##args); \
209 } while (0)
210#define DRM_DEBUG_KMS(prefix, fmt, args...) \
211 do { \
212 drm_ut_debug_printk(DRM_UT_KMS, prefix, \
213 __func__, fmt, ##args); \
214 } while (0)
215#define DRM_DEBUG_MODE(prefix, fmt, args...) \
216 do { \
217 drm_ut_debug_printk(DRM_UT_MODE, prefix, \
218 __func__, fmt, ##args); \
219 } while (0)
220#define DRM_LOG(fmt, args...) \
221 do { \
222 drm_ut_debug_printk(DRM_UT_CORE, NULL, \
223 NULL, fmt, ##args); \
224 } while (0)
225#define DRM_LOG_KMS(fmt, args...) \
226 do { \
227 drm_ut_debug_printk(DRM_UT_KMS, NULL, \
228 NULL, fmt, ##args); \
229 } while (0)
230#define DRM_LOG_MODE(fmt, args...) \
231 do { \
232 drm_ut_debug_printk(DRM_UT_MODE, NULL, \
233 NULL, fmt, ##args); \
234 } while (0)
235#define DRM_LOG_DRIVER(fmt, args...) \
190 do { \ 236 do { \
191 if ( drm_debug ) \ 237 drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
192 printk(KERN_DEBUG \ 238 NULL, fmt, ##args); \
193 "[" DRM_NAME ":%s] " fmt , \
194 __func__ , ##arg); \
195 } while (0) 239 } while (0)
196#else 240#else
241#define DRM_DEBUG_DRIVER(prefix, fmt, args...) do { } while (0)
242#define DRM_DEBUG_KMS(prefix, fmt, args...) do { } while (0)
243#define DRM_DEBUG_MODE(prefix, fmt, args...) do { } while (0)
197#define DRM_DEBUG(fmt, arg...) do { } while (0) 244#define DRM_DEBUG(fmt, arg...) do { } while (0)
245#define DRM_LOG(fmt, arg...) do { } while (0)
246#define DRM_LOG_KMS(fmt, args...) do { } while (0)
247#define DRM_LOG_MODE(fmt, arg...) do { } while (0)
248#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0)
249
198#endif 250#endif
199 251
200#define DRM_PROC_LIMIT (PAGE_SIZE-80) 252#define DRM_PROC_LIMIT (PAGE_SIZE-80)
@@ -237,15 +289,15 @@ struct drm_device;
237 * \param dev DRM device. 289 * \param dev DRM device.
238 * \param filp file pointer of the caller. 290 * \param filp file pointer of the caller.
239 */ 291 */
240#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ 292#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
241do { \ 293do { \
242 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) || \ 294 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
243 file_priv->master->lock.file_priv != file_priv) { \ 295 _file_priv->master->lock.file_priv != _file_priv) { \
244 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 296 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
245 __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\ 297 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
246 file_priv->master->lock.file_priv, file_priv); \ 298 _file_priv->master->lock.file_priv, _file_priv); \
247 return -EINVAL; \ 299 return -EINVAL; \
248 } \ 300 } \
249} while (0) 301} while (0)
250 302
251/** 303/**
@@ -502,26 +554,6 @@ struct drm_sigdata {
502}; 554};
503 555
504 556
505/*
506 * Generic memory manager structs
507 */
508
509struct drm_mm_node {
510 struct list_head fl_entry;
511 struct list_head ml_entry;
512 int free;
513 unsigned long start;
514 unsigned long size;
515 struct drm_mm *mm;
516 void *private;
517};
518
519struct drm_mm {
520 struct list_head fl_entry;
521 struct list_head ml_entry;
522};
523
524
525/** 557/**
526 * Kernel side of a mapping 558 * Kernel side of a mapping
527 */ 559 */
@@ -1385,22 +1417,6 @@ extern char *drm_get_connector_status_name(enum drm_connector_status status);
1385extern int drm_sysfs_connector_add(struct drm_connector *connector); 1417extern int drm_sysfs_connector_add(struct drm_connector *connector);
1386extern void drm_sysfs_connector_remove(struct drm_connector *connector); 1418extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1387 1419
1388/*
1389 * Basic memory manager support (drm_mm.c)
1390 */
1391extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
1392 unsigned long size,
1393 unsigned alignment);
1394extern void drm_mm_put_block(struct drm_mm_node * cur);
1395extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
1396 unsigned alignment, int best_match);
1397extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
1398extern void drm_mm_takedown(struct drm_mm *mm);
1399extern int drm_mm_clean(struct drm_mm *mm);
1400extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1401extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1402extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
1403
1404/* Graphics Execution Manager library functions (drm_gem.c) */ 1420/* Graphics Execution Manager library functions (drm_gem.c) */
1405int drm_gem_init(struct drm_device *dev); 1421int drm_gem_init(struct drm_device *dev);
1406void drm_gem_destroy(struct drm_device *dev); 1422void drm_gem_destroy(struct drm_device *dev);
@@ -1522,18 +1538,14 @@ static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
1522 1538
1523static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) 1539static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
1524{ 1540{
1525 u8 *addr; 1541 if (size * nmemb <= PAGE_SIZE)
1526
1527 if (size <= PAGE_SIZE)
1528 return kcalloc(nmemb, size, GFP_KERNEL); 1542 return kcalloc(nmemb, size, GFP_KERNEL);
1529 1543
1530 addr = vmalloc(nmemb * size); 1544 if (size != 0 && nmemb > ULONG_MAX / size)
1531 if (!addr)
1532 return NULL; 1545 return NULL;
1533 1546
1534 memset(addr, 0, nmemb * size); 1547 return __vmalloc(size * nmemb,
1535 1548 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
1536 return addr;
1537} 1549}
1538 1550
1539static __inline void drm_free_large(void *ptr) 1551static __inline void drm_free_large(void *ptr)
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index cd2b189e1be6..0af087a4d3b3 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -35,6 +35,8 @@
35#ifndef DRM_HASHTAB_H 35#ifndef DRM_HASHTAB_H
36#define DRM_HASHTAB_H 36#define DRM_HASHTAB_H
37 37
38#include <linux/list.h>
39
38#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) 40#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
39 41
40struct drm_hash_item { 42struct drm_hash_item {
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
new file mode 100644
index 000000000000..5662f4278ef3
--- /dev/null
+++ b/include/drm/drm_mm.h
@@ -0,0 +1,90 @@
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Authors:
30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31 */
32
33#ifndef _DRM_MM_H_
34#define _DRM_MM_H_
35
36/*
37 * Generic range manager structs
38 */
39#include <linux/list.h>
40
41struct drm_mm_node {
42 struct list_head fl_entry;
43 struct list_head ml_entry;
44 int free;
45 unsigned long start;
46 unsigned long size;
47 struct drm_mm *mm;
48 void *private;
49};
50
51struct drm_mm {
52 struct list_head fl_entry;
53 struct list_head ml_entry;
54 struct list_head unused_nodes;
55 int num_unused;
56 spinlock_t unused_lock;
57};
58
59/*
60 * Basic range manager support (drm_mm.c)
61 */
62
63extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
64 unsigned long size,
65 unsigned alignment);
66extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
67 unsigned long size,
68 unsigned alignment);
69extern void drm_mm_put_block(struct drm_mm_node *cur);
70extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
71 unsigned long size,
72 unsigned alignment,
73 int best_match);
74extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
75 unsigned long size);
76extern void drm_mm_takedown(struct drm_mm *mm);
77extern int drm_mm_clean(struct drm_mm *mm);
78extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
79extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
80 unsigned long size);
81extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
82 unsigned long size, int atomic);
83extern int drm_mm_pre_get(struct drm_mm *mm);
84
85static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
86{
87 return block->mm;
88}
89
90#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fc55db780199..f8634ab53b8f 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -254,6 +254,11 @@
254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
261 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 262 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 263 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 264 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
@@ -268,6 +273,8 @@
268 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 273 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
269 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 274 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
270 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 275 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
276 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
277 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
271 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 278 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
272 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 279 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
273 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 280 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -536,4 +543,6 @@
536 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 543 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
537 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 544 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
538 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 545 {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
546 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
547 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
539 {0, 0, 0} 548 {0, 0, 0}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ebdfde8fe556..0b1a6cae9de1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1226,6 +1226,8 @@ struct block_device_operations {
1226 int (*direct_access) (struct block_device *, sector_t, 1226 int (*direct_access) (struct block_device *, sector_t,
1227 void **, unsigned long *); 1227 void **, unsigned long *);
1228 int (*media_changed) (struct gendisk *); 1228 int (*media_changed) (struct gendisk *);
1229 unsigned long long (*set_capacity) (struct gendisk *,
1230 unsigned long long);
1229 int (*revalidate_disk) (struct gendisk *); 1231 int (*revalidate_disk) (struct gendisk *);
1230 int (*getgeo)(struct block_device *, struct hd_geometry *); 1232 int (*getgeo)(struct block_device *, struct hd_geometry *);
1231 struct module *owner; 1233 struct module *owner;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 37bcb50a4d7c..04fb5135b4e1 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -261,6 +261,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
261# define __section(S) __attribute__ ((__section__(#S))) 261# define __section(S) __attribute__ ((__section__(#S)))
262#endif 262#endif
263 263
264/* Are two types/vars the same type (ignoring qualifiers)? */
265#ifndef __same_type
266# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
267#endif
268
264/* 269/*
265 * Prevent the compiler from merging or refetching accesses. The compiler 270 * Prevent the compiler from merging or refetching accesses. The compiler
266 * is also forbidden from reordering successive instances of ACCESS_ONCE(), 271 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/device.h b/include/linux/device.h
index 5d5c197bad45..a4a7b10aaa48 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -62,8 +62,6 @@ struct bus_type {
62 void (*shutdown)(struct device *dev); 62 void (*shutdown)(struct device *dev);
63 63
64 int (*suspend)(struct device *dev, pm_message_t state); 64 int (*suspend)(struct device *dev, pm_message_t state);
65 int (*suspend_late)(struct device *dev, pm_message_t state);
66 int (*resume_early)(struct device *dev);
67 int (*resume)(struct device *dev); 65 int (*resume)(struct device *dev);
68 66
69 struct dev_pm_ops *pm; 67 struct dev_pm_ops *pm;
@@ -291,9 +289,6 @@ struct device_type {
291 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 289 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
292 void (*release)(struct device *dev); 290 void (*release)(struct device *dev);
293 291
294 int (*suspend)(struct device *dev, pm_message_t state);
295 int (*resume)(struct device *dev);
296
297 struct dev_pm_ops *pm; 292 struct dev_pm_ops *pm;
298}; 293};
299 294
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index b9cd38603fd8..0b3518c42356 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -81,8 +81,8 @@ struct dlm_lksb {
81 * the cluster, the calling node joins it. 81 * the cluster, the calling node joins it.
82 */ 82 */
83 83
84int dlm_new_lockspace(char *name, int namelen, dlm_lockspace_t **lockspace, 84int dlm_new_lockspace(const char *name, int namelen,
85 uint32_t flags, int lvblen); 85 dlm_lockspace_t **lockspace, uint32_t flags, int lvblen);
86 86
87/* 87/*
88 * dlm_release_lockspace 88 * dlm_release_lockspace
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 162e5defe683..d41ed593f79f 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -121,6 +121,13 @@ struct fuse_file_lock {
121#define FUSE_BIG_WRITES (1 << 5) 121#define FUSE_BIG_WRITES (1 << 5)
122 122
123/** 123/**
124 * CUSE INIT request/reply flags
125 *
126 * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl
127 */
128#define CUSE_UNRESTRICTED_IOCTL (1 << 0)
129
130/**
124 * Release flags 131 * Release flags
125 */ 132 */
126#define FUSE_RELEASE_FLUSH (1 << 0) 133#define FUSE_RELEASE_FLUSH (1 << 0)
@@ -210,6 +217,9 @@ enum fuse_opcode {
210 FUSE_DESTROY = 38, 217 FUSE_DESTROY = 38,
211 FUSE_IOCTL = 39, 218 FUSE_IOCTL = 39,
212 FUSE_POLL = 40, 219 FUSE_POLL = 40,
220
221 /* CUSE specific operations */
222 CUSE_INIT = 4096,
213}; 223};
214 224
215enum fuse_notify_code { 225enum fuse_notify_code {
@@ -401,6 +411,27 @@ struct fuse_init_out {
401 __u32 max_write; 411 __u32 max_write;
402}; 412};
403 413
414#define CUSE_INIT_INFO_MAX 4096
415
416struct cuse_init_in {
417 __u32 major;
418 __u32 minor;
419 __u32 unused;
420 __u32 flags;
421};
422
423struct cuse_init_out {
424 __u32 major;
425 __u32 minor;
426 __u32 unused;
427 __u32 flags;
428 __u32 max_read;
429 __u32 max_write;
430 __u32 dev_major; /* chardev major */
431 __u32 dev_minor; /* chardev minor */
432 __u32 spare[10];
433};
434
404struct fuse_interrupt_in { 435struct fuse_interrupt_in {
405 __u64 unique; 436 __u64 unique;
406}; 437};
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 149fda264c86..7cbd38d363a2 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -114,6 +114,7 @@ struct hd_struct {
114#define GENHD_FL_UP 16 114#define GENHD_FL_UP 16
115#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 115#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
116#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ 116#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
117#define GENHD_FL_NATIVE_CAPACITY 128
117 118
118#define BLK_SCSI_MAX_CMDS (256) 119#define BLK_SCSI_MAX_CMDS (256)
119#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 120#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0bbc15f54536..3760e7c5de02 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -85,6 +85,9 @@ struct vm_area_struct;
85 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ 85 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
86 __GFP_NORETRY|__GFP_NOMEMALLOC) 86 __GFP_NORETRY|__GFP_NOMEMALLOC)
87 87
88/* Control slab gfp mask during early boot */
89#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
90
88/* Control allocation constraints */ 91/* Control allocation constraints */
89#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) 92#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
90 93
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 867cb68d8461..a6c6a2fad7c8 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -178,7 +178,7 @@ typedef u8 hwif_chipset_t;
178/* 178/*
179 * Structure to hold all information about the location of this port 179 * Structure to hold all information about the location of this port
180 */ 180 */
181typedef struct hw_regs_s { 181struct ide_hw {
182 union { 182 union {
183 struct ide_io_ports io_ports; 183 struct ide_io_ports io_ports;
184 unsigned long io_ports_array[IDE_NR_PORTS]; 184 unsigned long io_ports_array[IDE_NR_PORTS];
@@ -186,12 +186,11 @@ typedef struct hw_regs_s {
186 186
187 int irq; /* our irq number */ 187 int irq; /* our irq number */
188 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ 188 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
189 hwif_chipset_t chipset;
190 struct device *dev, *parent; 189 struct device *dev, *parent;
191 unsigned long config; 190 unsigned long config;
192} hw_regs_t; 191};
193 192
194static inline void ide_std_init_ports(hw_regs_t *hw, 193static inline void ide_std_init_ports(struct ide_hw *hw,
195 unsigned long io_addr, 194 unsigned long io_addr,
196 unsigned long ctl_addr) 195 unsigned long ctl_addr)
197{ 196{
@@ -218,21 +217,12 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
218 217
219/* 218/*
220 * Special Driver Flags 219 * Special Driver Flags
221 *
222 * set_geometry : respecify drive geometry
223 * recalibrate : seek to cyl 0
224 * set_multmode : set multmode count
225 * reserved : unused
226 */ 220 */
227typedef union { 221enum {
228 unsigned all : 8; 222 IDE_SFLAG_SET_GEOMETRY = (1 << 0),
229 struct { 223 IDE_SFLAG_RECALIBRATE = (1 << 1),
230 unsigned set_geometry : 1; 224 IDE_SFLAG_SET_MULTMODE = (1 << 2),
231 unsigned recalibrate : 1; 225};
232 unsigned set_multmode : 1;
233 unsigned reserved : 5;
234 } b;
235} special_t;
236 226
237/* 227/*
238 * Status returned from various ide_ functions 228 * Status returned from various ide_ functions
@@ -391,6 +381,7 @@ struct ide_drive_s;
391struct ide_disk_ops { 381struct ide_disk_ops {
392 int (*check)(struct ide_drive_s *, const char *); 382 int (*check)(struct ide_drive_s *, const char *);
393 int (*get_capacity)(struct ide_drive_s *); 383 int (*get_capacity)(struct ide_drive_s *);
384 u64 (*set_capacity)(struct ide_drive_s *, u64);
394 void (*setup)(struct ide_drive_s *); 385 void (*setup)(struct ide_drive_s *);
395 void (*flush)(struct ide_drive_s *); 386 void (*flush)(struct ide_drive_s *);
396 int (*init_media)(struct ide_drive_s *, struct gendisk *); 387 int (*init_media)(struct ide_drive_s *, struct gendisk *);
@@ -468,6 +459,8 @@ enum {
468 IDE_DFLAG_NICE1 = (1 << 5), 459 IDE_DFLAG_NICE1 = (1 << 5),
469 /* device is physically present */ 460 /* device is physically present */
470 IDE_DFLAG_PRESENT = (1 << 6), 461 IDE_DFLAG_PRESENT = (1 << 6),
462 /* disable Host Protected Area */
463 IDE_DFLAG_NOHPA = (1 << 7),
471 /* id read from device (synthetic if not set) */ 464 /* id read from device (synthetic if not set) */
472 IDE_DFLAG_ID_READ = (1 << 8), 465 IDE_DFLAG_ID_READ = (1 << 8),
473 IDE_DFLAG_NOPROBE = (1 << 9), 466 IDE_DFLAG_NOPROBE = (1 << 9),
@@ -506,6 +499,7 @@ enum {
506 /* write protect */ 499 /* write protect */
507 IDE_DFLAG_WP = (1 << 29), 500 IDE_DFLAG_WP = (1 << 29),
508 IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), 501 IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
502 IDE_DFLAG_NIEN_QUIRK = (1 << 31),
509}; 503};
510 504
511struct ide_drive_s { 505struct ide_drive_s {
@@ -530,14 +524,13 @@ struct ide_drive_s {
530 unsigned long sleep; /* sleep until this time */ 524 unsigned long sleep; /* sleep until this time */
531 unsigned long timeout; /* max time to wait for irq */ 525 unsigned long timeout; /* max time to wait for irq */
532 526
533 special_t special; /* special action flags */ 527 u8 special_flags; /* special action flags */
534 528
535 u8 select; /* basic drive/head select reg value */ 529 u8 select; /* basic drive/head select reg value */
536 u8 retry_pio; /* retrying dma capable host in pio */ 530 u8 retry_pio; /* retrying dma capable host in pio */
537 u8 waiting_for_dma; /* dma currently in progress */ 531 u8 waiting_for_dma; /* dma currently in progress */
538 u8 dma; /* atapi dma flag */ 532 u8 dma; /* atapi dma flag */
539 533
540 u8 quirk_list; /* considered quirky, set for a specific host */
541 u8 init_speed; /* transfer rate set at boot */ 534 u8 init_speed; /* transfer rate set at boot */
542 u8 current_speed; /* current transfer rate set */ 535 u8 current_speed; /* current transfer rate set */
543 u8 desired_speed; /* desired transfer rate set */ 536 u8 desired_speed; /* desired transfer rate set */
@@ -562,8 +555,7 @@ struct ide_drive_s {
562 unsigned int drive_data; /* used by set_pio_mode/dev_select() */ 555 unsigned int drive_data; /* used by set_pio_mode/dev_select() */
563 unsigned int failures; /* current failure count */ 556 unsigned int failures; /* current failure count */
564 unsigned int max_failures; /* maximum allowed failure count */ 557 unsigned int max_failures; /* maximum allowed failure count */
565 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ 558 u64 probed_capacity;/* initial/native media capacity */
566
567 u64 capacity64; /* total number of sectors */ 559 u64 capacity64; /* total number of sectors */
568 560
569 int lun; /* logical unit */ 561 int lun; /* logical unit */
@@ -1222,7 +1214,7 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
1222} 1214}
1223 1215
1224void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, 1216void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
1225 hw_regs_t *, hw_regs_t **); 1217 struct ide_hw *, struct ide_hw **);
1226void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); 1218void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1227 1219
1228#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 1220#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -1461,16 +1453,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
1461void ide_register_region(struct gendisk *); 1453void ide_register_region(struct gendisk *);
1462void ide_unregister_region(struct gendisk *); 1454void ide_unregister_region(struct gendisk *);
1463 1455
1456void ide_check_nien_quirk_list(ide_drive_t *);
1464void ide_undecoded_slave(ide_drive_t *); 1457void ide_undecoded_slave(ide_drive_t *);
1465 1458
1466void ide_port_apply_params(ide_hwif_t *); 1459void ide_port_apply_params(ide_hwif_t *);
1467int ide_sysfs_register_port(ide_hwif_t *); 1460int ide_sysfs_register_port(ide_hwif_t *);
1468 1461
1469struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); 1462struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
1463 unsigned int);
1470void ide_host_free(struct ide_host *); 1464void ide_host_free(struct ide_host *);
1471int ide_host_register(struct ide_host *, const struct ide_port_info *, 1465int ide_host_register(struct ide_host *, const struct ide_port_info *,
1472 hw_regs_t **); 1466 struct ide_hw **);
1473int ide_host_add(const struct ide_port_info *, hw_regs_t **, 1467int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
1474 struct ide_host **); 1468 struct ide_host **);
1475void ide_host_remove(struct ide_host *); 1469void ide_host_remove(struct ide_host *);
1476int ide_legacy_device_add(const struct ide_port_info *, unsigned long); 1470int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index cfe4fe1b7132..60e8934d10b5 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -79,6 +79,7 @@
79#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 79#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
80#define ETH_P_TIPC 0x88CA /* TIPC */ 80#define ETH_P_TIPC 0x88CA /* TIPC */
81#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ 81#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
82#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
82#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ 83#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
83 84
84/* 85/*
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index ff374ceface0..c41e812e9d5e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -183,6 +183,7 @@ extern void disable_irq(unsigned int irq);
183extern void enable_irq(unsigned int irq); 183extern void enable_irq(unsigned int irq);
184 184
185/* The following three functions are for the core kernel use only. */ 185/* The following three functions are for the core kernel use only. */
186#ifdef CONFIG_GENERIC_HARDIRQS
186extern void suspend_device_irqs(void); 187extern void suspend_device_irqs(void);
187extern void resume_device_irqs(void); 188extern void resume_device_irqs(void);
188#ifdef CONFIG_PM_SLEEP 189#ifdef CONFIG_PM_SLEEP
@@ -190,6 +191,11 @@ extern int check_wakeup_irqs(void);
190#else 191#else
191static inline int check_wakeup_irqs(void) { return 0; } 192static inline int check_wakeup_irqs(void) { return 0; }
192#endif 193#endif
194#else
195static inline void suspend_device_irqs(void) { };
196static inline void resume_device_irqs(void) { };
197static inline int check_wakeup_irqs(void) { return 0; }
198#endif
193 199
194#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 200#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
195 201
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 175e63f4a8c0..7bc1440fc473 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -30,6 +30,10 @@ struct lguest_data
30 /* Wallclock time set by the Host. */ 30 /* Wallclock time set by the Host. */
31 struct timespec time; 31 struct timespec time;
32 32
33 /* Interrupt pending set by the Host. The Guest should do a hypercall
34 * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
35 int irq_pending;
36
33 /* Async hypercall ring. Instead of directly making hypercalls, we can 37 /* Async hypercall ring. Instead of directly making hypercalls, we can
34 * place them in here for processing the next time the Host wants. 38 * place them in here for processing the next time the Host wants.
35 * This batching can be quite efficient. */ 39 * This batching can be quite efficient. */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index a53407a4165c..bfefbdf7498a 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -57,7 +57,8 @@ enum lguest_req
57 LHREQ_INITIALIZE, /* + base, pfnlimit, start */ 57 LHREQ_INITIALIZE, /* + base, pfnlimit, start */
58 LHREQ_GETDMA, /* No longer used */ 58 LHREQ_GETDMA, /* No longer used */
59 LHREQ_IRQ, /* + irq */ 59 LHREQ_IRQ, /* + irq */
60 LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ 60 LHREQ_BREAK, /* No longer used */
61 LHREQ_EVENTFD, /* + address, fd. */
61}; 62};
62 63
63/* The alignment to use between consumer and producer parts of vring. 64/* The alignment to use between consumer and producer parts of vring.
diff --git a/include/linux/module.h b/include/linux/module.h
index a8f2c0aa4c32..a7bc6e7b43a7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -77,6 +77,7 @@ search_extable(const struct exception_table_entry *first,
77void sort_extable(struct exception_table_entry *start, 77void sort_extable(struct exception_table_entry *start,
78 struct exception_table_entry *finish); 78 struct exception_table_entry *finish);
79void sort_main_extable(void); 79void sort_main_extable(void);
80void trim_init_extable(struct module *m);
80 81
81#ifdef MODULE 82#ifdef MODULE
82#define MODULE_GENERIC_TABLE(gtype,name) \ 83#define MODULE_GENERIC_TABLE(gtype,name) \
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index a4f0b931846c..6547c3cdbc4c 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -36,9 +36,14 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
36/* Returns length written or -errno. Buffer is 4k (ie. be short!) */ 36/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
37typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); 37typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
38 38
39/* Flag bits for kernel_param.flags */
40#define KPARAM_KMALLOCED 1
41#define KPARAM_ISBOOL 2
42
39struct kernel_param { 43struct kernel_param {
40 const char *name; 44 const char *name;
41 unsigned int perm; 45 u16 perm;
46 u16 flags;
42 param_set_fn set; 47 param_set_fn set;
43 param_get_fn get; 48 param_get_fn get;
44 union { 49 union {
@@ -79,7 +84,7 @@ struct kparam_array
79 parameters. perm sets the visibility in sysfs: 000 means it's 84 parameters. perm sets the visibility in sysfs: 000 means it's
80 not there, read bits mean it's readable, write bits mean it's 85 not there, read bits mean it's readable, write bits mean it's
81 writable. */ 86 writable. */
82#define __module_param_call(prefix, name, set, get, arg, perm) \ 87#define __module_param_call(prefix, name, set, get, arg, isbool, perm) \
83 /* Default value instead of permissions? */ \ 88 /* Default value instead of permissions? */ \
84 static int __param_perm_check_##name __attribute__((unused)) = \ 89 static int __param_perm_check_##name __attribute__((unused)) = \
85 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ 90 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
@@ -88,10 +93,13 @@ struct kparam_array
88 static struct kernel_param __moduleparam_const __param_##name \ 93 static struct kernel_param __moduleparam_const __param_##name \
89 __used \ 94 __used \
90 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ 95 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
91 = { __param_str_##name, perm, set, get, { arg } } 96 = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \
97 set, get, { arg } }
92 98
93#define module_param_call(name, set, get, arg, perm) \ 99#define module_param_call(name, set, get, arg, perm) \
94 __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm) 100 __module_param_call(MODULE_PARAM_PREFIX, \
101 name, set, get, arg, \
102 __same_type(*(arg), bool), perm)
95 103
96/* Helper functions: type is byte, short, ushort, int, uint, long, 104/* Helper functions: type is byte, short, ushort, int, uint, long,
97 ulong, charp, bool or invbool, or XXX if you define param_get_XXX, 105 ulong, charp, bool or invbool, or XXX if you define param_get_XXX,
@@ -120,15 +128,16 @@ struct kparam_array
120#define core_param(name, var, type, perm) \ 128#define core_param(name, var, type, perm) \
121 param_check_##type(name, &(var)); \ 129 param_check_##type(name, &(var)); \
122 __module_param_call("", name, param_set_##type, param_get_##type, \ 130 __module_param_call("", name, param_set_##type, param_get_##type, \
123 &var, perm) 131 &var, __same_type(var, bool), perm)
124#endif /* !MODULE */ 132#endif /* !MODULE */
125 133
126/* Actually copy string: maxlen param is usually sizeof(string). */ 134/* Actually copy string: maxlen param is usually sizeof(string). */
127#define module_param_string(name, string, len, perm) \ 135#define module_param_string(name, string, len, perm) \
128 static const struct kparam_string __param_string_##name \ 136 static const struct kparam_string __param_string_##name \
129 = { len, string }; \ 137 = { len, string }; \
130 module_param_call(name, param_set_copystring, param_get_string, \ 138 __module_param_call(MODULE_PARAM_PREFIX, name, \
131 .str = &__param_string_##name, perm); \ 139 param_set_copystring, param_get_string, \
140 .str = &__param_string_##name, 0, perm); \
132 __MODULE_PARM_TYPE(name, "string") 141 __MODULE_PARM_TYPE(name, "string")
133 142
134/* Called on module insert or kernel boot */ 143/* Called on module insert or kernel boot */
@@ -186,21 +195,30 @@ extern int param_set_charp(const char *val, struct kernel_param *kp);
186extern int param_get_charp(char *buffer, struct kernel_param *kp); 195extern int param_get_charp(char *buffer, struct kernel_param *kp);
187#define param_check_charp(name, p) __param_check(name, p, char *) 196#define param_check_charp(name, p) __param_check(name, p, char *)
188 197
198/* For historical reasons "bool" parameters can be (unsigned) "int". */
189extern int param_set_bool(const char *val, struct kernel_param *kp); 199extern int param_set_bool(const char *val, struct kernel_param *kp);
190extern int param_get_bool(char *buffer, struct kernel_param *kp); 200extern int param_get_bool(char *buffer, struct kernel_param *kp);
191#define param_check_bool(name, p) __param_check(name, p, int) 201#define param_check_bool(name, p) \
202 static inline void __check_##name(void) \
203 { \
204 BUILD_BUG_ON(!__same_type(*(p), bool) && \
205 !__same_type(*(p), unsigned int) && \
206 !__same_type(*(p), int)); \
207 }
192 208
193extern int param_set_invbool(const char *val, struct kernel_param *kp); 209extern int param_set_invbool(const char *val, struct kernel_param *kp);
194extern int param_get_invbool(char *buffer, struct kernel_param *kp); 210extern int param_get_invbool(char *buffer, struct kernel_param *kp);
195#define param_check_invbool(name, p) __param_check(name, p, int) 211#define param_check_invbool(name, p) __param_check(name, p, bool)
196 212
197/* Comma-separated array: *nump is set to number they actually specified. */ 213/* Comma-separated array: *nump is set to number they actually specified. */
198#define module_param_array_named(name, array, type, nump, perm) \ 214#define module_param_array_named(name, array, type, nump, perm) \
199 static const struct kparam_array __param_arr_##name \ 215 static const struct kparam_array __param_arr_##name \
200 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ 216 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
201 sizeof(array[0]), array }; \ 217 sizeof(array[0]), array }; \
202 module_param_call(name, param_array_set, param_array_get, \ 218 __module_param_call(MODULE_PARAM_PREFIX, name, \
203 .arr = &__param_arr_##name, perm); \ 219 param_array_set, param_array_get, \
220 .arr = &__param_arr_##name, \
221 __same_type(array[0], bool), perm); \
204 __MODULE_PARM_TYPE(name, "array of " #type) 222 __MODULE_PARM_TYPE(name, "array of " #type)
205 223
206#define module_param_array(name, type, nump, perm) \ 224#define module_param_array(name, type, nump, perm) \
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 7339c7bf7331..13f126c89ae8 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -18,7 +18,19 @@ struct page_cgroup {
18}; 18};
19 19
20void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); 20void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
21void __init page_cgroup_init(void); 21
22#ifdef CONFIG_SPARSEMEM
23static inline void __init page_cgroup_init_flatmem(void)
24{
25}
26extern void __init page_cgroup_init(void);
27#else
28void __init page_cgroup_init_flatmem(void);
29static inline void __init page_cgroup_init(void)
30{
31}
32#endif
33
22struct page_cgroup *lookup_page_cgroup(struct page *page); 34struct page_cgroup *lookup_page_cgroup(struct page *page);
23 35
24enum { 36enum {
@@ -87,6 +99,10 @@ static inline void page_cgroup_init(void)
87{ 99{
88} 100}
89 101
102static inline void __init page_cgroup_init_flatmem(void)
103{
104}
105
90#endif 106#endif
91 107
92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 108#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d7d1c41a0b17..19f8e6d1a4d2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1005,6 +1005,7 @@
1005#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 1005#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
1006#define PCI_DEVICE_ID_PLX_9030 0x9030 1006#define PCI_DEVICE_ID_PLX_9030 0x9030
1007#define PCI_DEVICE_ID_PLX_9050 0x9050 1007#define PCI_DEVICE_ID_PLX_9050 0x9050
1008#define PCI_DEVICE_ID_PLX_9056 0x9056
1008#define PCI_DEVICE_ID_PLX_9080 0x9080 1009#define PCI_DEVICE_ID_PLX_9080 0x9080
1009#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 1010#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
1010 1011
@@ -1314,6 +1315,13 @@
1314 1315
1315#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ 1316#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
1316#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 1317#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
1318#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005
1319#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b
1320#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024
1321#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041
1322#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042
1323#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043
1324#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000
1317 1325
1318#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ 1326#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
1319#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 1327#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
@@ -1847,6 +1855,10 @@
1847#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 1855#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
1848#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 1856#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
1849 1857
1858#define PCI_VENDOR_ID_DIGIGRAM 0x1369
1859#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
1860#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
1861
1850#define PCI_VENDOR_ID_KAWASAKI 0x136b 1862#define PCI_VENDOR_ID_KAWASAKI 0x136b
1851#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 1863#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
1852 1864
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 6e133954e2e4..1b3118a1023a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -120,6 +120,8 @@ enum perf_counter_sample_format {
120 PERF_SAMPLE_ID = 1U << 6, 120 PERF_SAMPLE_ID = 1U << 6,
121 PERF_SAMPLE_CPU = 1U << 7, 121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8, 122 PERF_SAMPLE_PERIOD = 1U << 8,
123
124 PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
123}; 125};
124 126
125/* 127/*
@@ -131,17 +133,26 @@ enum perf_counter_read_format {
131 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 133 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
132 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 134 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
133 PERF_FORMAT_ID = 1U << 2, 135 PERF_FORMAT_ID = 1U << 2,
136
137 PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
134}; 138};
135 139
140#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
141
136/* 142/*
137 * Hardware event to monitor via a performance monitoring counter: 143 * Hardware event to monitor via a performance monitoring counter:
138 */ 144 */
139struct perf_counter_attr { 145struct perf_counter_attr {
146
140 /* 147 /*
141 * Major type: hardware/software/tracepoint/etc. 148 * Major type: hardware/software/tracepoint/etc.
142 */ 149 */
143 __u32 type; 150 __u32 type;
144 __u32 __reserved_1; 151
152 /*
153 * Size of the attr structure, for fwd/bwd compat.
154 */
155 __u32 size;
145 156
146 /* 157 /*
147 * Type specific configuration information. 158 * Type specific configuration information.
@@ -168,12 +179,12 @@ struct perf_counter_attr {
168 comm : 1, /* include comm data */ 179 comm : 1, /* include comm data */
169 freq : 1, /* use freq, not period */ 180 freq : 1, /* use freq, not period */
170 181
171 __reserved_2 : 53; 182 __reserved_1 : 53;
172 183
173 __u32 wakeup_events; /* wakeup every n events */ 184 __u32 wakeup_events; /* wakeup every n events */
174 __u32 __reserved_3; 185 __u32 __reserved_2;
175 186
176 __u64 __reserved_4; 187 __u64 __reserved_3;
177}; 188};
178 189
179/* 190/*
@@ -621,7 +632,8 @@ extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
621static inline int is_software_counter(struct perf_counter *counter) 632static inline int is_software_counter(struct perf_counter *counter)
622{ 633{
623 return (counter->attr.type != PERF_TYPE_RAW) && 634 return (counter->attr.type != PERF_TYPE_RAW) &&
624 (counter->attr.type != PERF_TYPE_HARDWARE); 635 (counter->attr.type != PERF_TYPE_HARDWARE) &&
636 (counter->attr.type != PERF_TYPE_HW_CACHE);
625} 637}
626 638
627extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); 639extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1d4e2d289821..b3f74764a586 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -382,14 +382,13 @@ struct dev_pm_info {
382#ifdef CONFIG_PM_SLEEP 382#ifdef CONFIG_PM_SLEEP
383extern void device_pm_lock(void); 383extern void device_pm_lock(void);
384extern int sysdev_resume(void); 384extern int sysdev_resume(void);
385extern void device_power_up(pm_message_t state); 385extern void dpm_resume_noirq(pm_message_t state);
386extern void device_resume(pm_message_t state); 386extern void dpm_resume_end(pm_message_t state);
387 387
388extern void device_pm_unlock(void); 388extern void device_pm_unlock(void);
389extern int sysdev_suspend(pm_message_t state); 389extern int sysdev_suspend(pm_message_t state);
390extern int device_power_down(pm_message_t state); 390extern int dpm_suspend_noirq(pm_message_t state);
391extern int device_suspend(pm_message_t state); 391extern int dpm_suspend_start(pm_message_t state);
392extern int device_prepare_suspend(pm_message_t state);
393 392
394extern void __suspend_report_result(const char *function, void *fn, int ret); 393extern void __suspend_report_result(const char *function, void *fn, int ret);
395 394
@@ -403,7 +402,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
403#define device_pm_lock() do {} while (0) 402#define device_pm_lock() do {} while (0)
404#define device_pm_unlock() do {} while (0) 403#define device_pm_unlock() do {} while (0)
405 404
406static inline int device_suspend(pm_message_t state) 405static inline int dpm_suspend_start(pm_message_t state)
407{ 406{
408 return 0; 407 return 0;
409} 408}
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index ca3c88773028..b063c7328ba5 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -446,6 +446,7 @@ int pnp_start_dev(struct pnp_dev *dev);
446int pnp_stop_dev(struct pnp_dev *dev); 446int pnp_stop_dev(struct pnp_dev *dev);
447int pnp_activate_dev(struct pnp_dev *dev); 447int pnp_activate_dev(struct pnp_dev *dev);
448int pnp_disable_dev(struct pnp_dev *dev); 448int pnp_disable_dev(struct pnp_dev *dev);
449int pnp_range_reserved(resource_size_t start, resource_size_t end);
449 450
450/* protocol helpers */ 451/* protocol helpers */
451int pnp_is_active(struct pnp_dev *dev); 452int pnp_is_active(struct pnp_dev *dev);
@@ -476,6 +477,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
476static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } 477static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
477static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } 478static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
478static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } 479static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
480static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
479 481
480/* protocol helpers */ 482/* protocol helpers */
481static inline int pnp_is_active(struct pnp_dev *dev) { return 0; } 483static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 48803064cedf..219b8fb4651d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -319,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
319 return kmalloc_node(size, flags | __GFP_ZERO, node); 319 return kmalloc_node(size, flags | __GFP_ZERO, node);
320} 320}
321 321
322void __init kmem_cache_init_late(void);
323
322#endif /* _LINUX_SLAB_H */ 324#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 0ec00b39d006..bb5368df4be8 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
34 return kmalloc(size, flags); 34 return kmalloc(size, flags);
35} 35}
36 36
37static inline void kmem_cache_init_late(void)
38{
39 /* Nothing to do */
40}
41
37#endif /* __LINUX_SLOB_DEF_H */ 42#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index be5d40c43bd2..4dcbc2c71491 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
302} 302}
303#endif 303#endif
304 304
305void __init kmem_cache_init_late(void);
306
305#endif /* _LINUX_SLUB_DEF_H */ 307#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 795032edfc46..cd15df6c63cd 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -245,11 +245,6 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
245 245
246extern void hibernation_set_ops(struct platform_hibernation_ops *ops); 246extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
247extern int hibernate(void); 247extern int hibernate(void);
248extern int hibernate_nvs_register(unsigned long start, unsigned long size);
249extern int hibernate_nvs_alloc(void);
250extern void hibernate_nvs_free(void);
251extern void hibernate_nvs_save(void);
252extern void hibernate_nvs_restore(void);
253extern bool system_entering_hibernation(void); 248extern bool system_entering_hibernation(void);
254#else /* CONFIG_HIBERNATION */ 249#else /* CONFIG_HIBERNATION */
255static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 250static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
@@ -258,6 +253,16 @@ static inline void swsusp_unset_page_free(struct page *p) {}
258 253
259static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} 254static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
260static inline int hibernate(void) { return -ENOSYS; } 255static inline int hibernate(void) { return -ENOSYS; }
256static inline bool system_entering_hibernation(void) { return false; }
257#endif /* CONFIG_HIBERNATION */
258
259#ifdef CONFIG_HIBERNATION_NVS
260extern int hibernate_nvs_register(unsigned long start, unsigned long size);
261extern int hibernate_nvs_alloc(void);
262extern void hibernate_nvs_free(void);
263extern void hibernate_nvs_save(void);
264extern void hibernate_nvs_restore(void);
265#else /* CONFIG_HIBERNATION_NVS */
261static inline int hibernate_nvs_register(unsigned long a, unsigned long b) 266static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
262{ 267{
263 return 0; 268 return 0;
@@ -266,8 +271,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; }
266static inline void hibernate_nvs_free(void) {} 271static inline void hibernate_nvs_free(void) {}
267static inline void hibernate_nvs_save(void) {} 272static inline void hibernate_nvs_save(void) {}
268static inline void hibernate_nvs_restore(void) {} 273static inline void hibernate_nvs_restore(void) {}
269static inline bool system_entering_hibernation(void) { return false; } 274#endif /* CONFIG_HIBERNATION_NVS */
270#endif /* CONFIG_HIBERNATION */
271 275
272#ifdef CONFIG_PM_SLEEP 276#ifdef CONFIG_PM_SLEEP
273void save_processor_state(void); 277void save_processor_state(void);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index c6c84ad8bd71..418d90f5effe 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -758,6 +758,6 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
758 758
759 759
760asmlinkage long sys_perf_counter_open( 760asmlinkage long sys_perf_counter_open(
761 const struct perf_counter_attr __user *attr_uptr, 761 struct perf_counter_attr __user *attr_uptr,
762 pid_t pid, int cpu, int group_fd, unsigned long flags); 762 pid_t pid, int cpu, int group_fd, unsigned long flags);
763#endif 763#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 06005fa9e982..4fca4f5440ba 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,14 +10,17 @@
10 10
11/** 11/**
12 * virtqueue - a queue to register buffers for sending or receiving. 12 * virtqueue - a queue to register buffers for sending or receiving.
13 * @list: the chain of virtqueues for this device
13 * @callback: the function to call when buffers are consumed (can be NULL). 14 * @callback: the function to call when buffers are consumed (can be NULL).
15 * @name: the name of this virtqueue (mainly for debugging)
14 * @vdev: the virtio device this queue was created for. 16 * @vdev: the virtio device this queue was created for.
15 * @vq_ops: the operations for this virtqueue (see below). 17 * @vq_ops: the operations for this virtqueue (see below).
16 * @priv: a pointer for the virtqueue implementation to use. 18 * @priv: a pointer for the virtqueue implementation to use.
17 */ 19 */
18struct virtqueue 20struct virtqueue {
19{ 21 struct list_head list;
20 void (*callback)(struct virtqueue *vq); 22 void (*callback)(struct virtqueue *vq);
23 const char *name;
21 struct virtio_device *vdev; 24 struct virtio_device *vdev;
22 struct virtqueue_ops *vq_ops; 25 struct virtqueue_ops *vq_ops;
23 void *priv; 26 void *priv;
@@ -76,15 +79,16 @@ struct virtqueue_ops {
76 * @dev: underlying device. 79 * @dev: underlying device.
77 * @id: the device type identification (used to match it with a driver). 80 * @id: the device type identification (used to match it with a driver).
78 * @config: the configuration ops for this device. 81 * @config: the configuration ops for this device.
82 * @vqs: the list of virtqueues for this device.
79 * @features: the features supported by both driver and device. 83 * @features: the features supported by both driver and device.
80 * @priv: private pointer for the driver's use. 84 * @priv: private pointer for the driver's use.
81 */ 85 */
82struct virtio_device 86struct virtio_device {
83{
84 int index; 87 int index;
85 struct device dev; 88 struct device dev;
86 struct virtio_device_id id; 89 struct virtio_device_id id;
87 struct virtio_config_ops *config; 90 struct virtio_config_ops *config;
91 struct list_head vqs;
88 /* Note that this is a Linux set_bit-style bitmap. */ 92 /* Note that this is a Linux set_bit-style bitmap. */
89 unsigned long features[1]; 93 unsigned long features[1];
90 void *priv; 94 void *priv;
@@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev);
99 * @id_table: the ids serviced by this driver. 103 * @id_table: the ids serviced by this driver.
100 * @feature_table: an array of feature numbers supported by this device. 104 * @feature_table: an array of feature numbers supported by this device.
101 * @feature_table_size: number of entries in the feature table array. 105 * @feature_table_size: number of entries in the feature table array.
102 * @probe: the function to call when a device is found. Returns a token for 106 * @probe: the function to call when a device is found. Returns 0 or -errno.
103 * remove, or PTR_ERR().
104 * @remove: the function when a device is removed. 107 * @remove: the function when a device is removed.
105 * @config_changed: optional function to call when the device configuration 108 * @config_changed: optional function to call when the device configuration
106 * changes; may be called in interrupt context. 109 * changes; may be called in interrupt context.
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index bf8ec283b232..99f514575f6a 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -29,6 +29,7 @@
29#define VIRTIO_F_NOTIFY_ON_EMPTY 24 29#define VIRTIO_F_NOTIFY_ON_EMPTY 24
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32#include <linux/err.h>
32#include <linux/virtio.h> 33#include <linux/virtio.h>
33 34
34/** 35/**
@@ -49,15 +50,26 @@
49 * @set_status: write the status byte 50 * @set_status: write the status byte
50 * vdev: the virtio_device 51 * vdev: the virtio_device
51 * status: the new status byte 52 * status: the new status byte
53 * @request_vqs: request the specified number of virtqueues
54 * vdev: the virtio_device
55 * max_vqs: the max number of virtqueues we want
56 * If supplied, must call before any virtqueues are instantiated.
57 * To modify the max number of virtqueues after request_vqs has been
58 * called, call free_vqs and then request_vqs with a new value.
59 * @free_vqs: cleanup resources allocated by request_vqs
60 * vdev: the virtio_device
61 * If supplied, must call after all virtqueues have been deleted.
52 * @reset: reset the device 62 * @reset: reset the device
53 * vdev: the virtio device 63 * vdev: the virtio device
54 * After this, status and feature negotiation must be done again 64 * After this, status and feature negotiation must be done again
55 * @find_vq: find a virtqueue and instantiate it. 65 * @find_vqs: find virtqueues and instantiate them.
56 * vdev: the virtio_device 66 * vdev: the virtio_device
57 * index: the 0-based virtqueue number in case there's more than one. 67 * nvqs: the number of virtqueues to find
58 * callback: the virqtueue callback 68 * vqs: on success, includes new virtqueues
59 * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). 69 * callbacks: array of callbacks, for each virtqueue
60 * @del_vq: free a virtqueue found by find_vq(). 70 * names: array of virtqueue names (mainly for debugging)
71 * Returns 0 on success or error status
72 * @del_vqs: free virtqueues found by find_vqs().
61 * @get_features: get the array of feature bits for this device. 73 * @get_features: get the array of feature bits for this device.
62 * vdev: the virtio_device 74 * vdev: the virtio_device
63 * Returns the first 32 feature bits (all we currently need). 75 * Returns the first 32 feature bits (all we currently need).
@@ -66,6 +78,7 @@
66 * This gives the final feature bits for the device: it can change 78 * This gives the final feature bits for the device: it can change
67 * the dev->feature bits if it wants. 79 * the dev->feature bits if it wants.
68 */ 80 */
81typedef void vq_callback_t(struct virtqueue *);
69struct virtio_config_ops 82struct virtio_config_ops
70{ 83{
71 void (*get)(struct virtio_device *vdev, unsigned offset, 84 void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -75,10 +88,11 @@ struct virtio_config_ops
75 u8 (*get_status)(struct virtio_device *vdev); 88 u8 (*get_status)(struct virtio_device *vdev);
76 void (*set_status)(struct virtio_device *vdev, u8 status); 89 void (*set_status)(struct virtio_device *vdev, u8 status);
77 void (*reset)(struct virtio_device *vdev); 90 void (*reset)(struct virtio_device *vdev);
78 struct virtqueue *(*find_vq)(struct virtio_device *vdev, 91 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
79 unsigned index, 92 struct virtqueue *vqs[],
80 void (*callback)(struct virtqueue *)); 93 vq_callback_t *callbacks[],
81 void (*del_vq)(struct virtqueue *vq); 94 const char *names[]);
95 void (*del_vqs)(struct virtio_device *);
82 u32 (*get_features)(struct virtio_device *vdev); 96 u32 (*get_features)(struct virtio_device *vdev);
83 void (*finalize_features)(struct virtio_device *vdev); 97 void (*finalize_features)(struct virtio_device *vdev);
84}; 98};
@@ -99,7 +113,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
99 if (__builtin_constant_p(fbit)) 113 if (__builtin_constant_p(fbit))
100 BUILD_BUG_ON(fbit >= 32); 114 BUILD_BUG_ON(fbit >= 32);
101 115
102 virtio_check_driver_offered_feature(vdev, fbit); 116 if (fbit < VIRTIO_TRANSPORT_F_START)
117 virtio_check_driver_offered_feature(vdev, fbit);
118
103 return test_bit(fbit, vdev->features); 119 return test_bit(fbit, vdev->features);
104} 120}
105 121
@@ -126,5 +142,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev,
126 vdev->config->get(vdev, offset, buf, len); 142 vdev->config->get(vdev, offset, buf, len);
127 return 0; 143 return 0;
128} 144}
145
146static inline
147struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
148 vq_callback_t *c, const char *n)
149{
150 vq_callback_t *callbacks[] = { c };
151 const char *names[] = { n };
152 struct virtqueue *vq;
153 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names);
154 if (err < 0)
155 return ERR_PTR(err);
156 return vq;
157}
129#endif /* __KERNEL__ */ 158#endif /* __KERNEL__ */
130#endif /* _LINUX_VIRTIO_CONFIG_H */ 159#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index cd0fd5d181a6..9a3d7c48c622 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -47,9 +47,17 @@
47/* The bit of the ISR which indicates a device configuration change. */ 47/* The bit of the ISR which indicates a device configuration change. */
48#define VIRTIO_PCI_ISR_CONFIG 0x2 48#define VIRTIO_PCI_ISR_CONFIG 0x2
49 49
50/* MSI-X registers: only enabled if MSI-X is enabled. */
51/* A 16-bit vector for configuration changes. */
52#define VIRTIO_MSI_CONFIG_VECTOR 20
53/* A 16-bit vector for selected queue notifications. */
54#define VIRTIO_MSI_QUEUE_VECTOR 22
55/* Vector value used to disable MSI for queue */
56#define VIRTIO_MSI_NO_VECTOR 0xffff
57
50/* The remaining space is defined by each driver as the per-driver 58/* The remaining space is defined by each driver as the per-driver
51 * configuration space */ 59 * configuration space */
52#define VIRTIO_PCI_CONFIG 20 60#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
53 61
54/* Virtio ABI version, this must match exactly */ 62/* Virtio ABI version, this must match exactly */
55#define VIRTIO_PCI_ABI_VERSION 0 63#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 71e03722fb59..693e0ec5afa6 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -14,6 +14,8 @@
14#define VRING_DESC_F_NEXT 1 14#define VRING_DESC_F_NEXT 1
15/* This marks a buffer as write-only (otherwise read-only). */ 15/* This marks a buffer as write-only (otherwise read-only). */
16#define VRING_DESC_F_WRITE 2 16#define VRING_DESC_F_WRITE 2
17/* This means the buffer contains a list of buffer descriptors. */
18#define VRING_DESC_F_INDIRECT 4
17 19
18/* The Host uses this in used->flags to advise the Guest: don't kick me when 20/* The Host uses this in used->flags to advise the Guest: don't kick me when
19 * you add a buffer. It's unreliable, so it's simply an optimization. Guest 21 * you add a buffer. It's unreliable, so it's simply an optimization. Guest
@@ -24,6 +26,9 @@
24 * optimization. */ 26 * optimization. */
25#define VRING_AVAIL_F_NO_INTERRUPT 1 27#define VRING_AVAIL_F_NO_INTERRUPT 1
26 28
29/* We support indirect buffer descriptors */
30#define VIRTIO_RING_F_INDIRECT_DESC 28
31
27/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ 32/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
28struct vring_desc 33struct vring_desc
29{ 34{
@@ -119,7 +124,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
119 struct virtio_device *vdev, 124 struct virtio_device *vdev,
120 void *pages, 125 void *pages,
121 void (*notify)(struct virtqueue *vq), 126 void (*notify)(struct virtqueue *vq),
122 void (*callback)(struct virtqueue *vq)); 127 void (*callback)(struct virtqueue *vq),
128 const char *name);
123void vring_del_virtqueue(struct virtqueue *vq); 129void vring_del_virtqueue(struct virtqueue *vq);
124/* Filter out transport-specific feature bits. */ 130/* Filter out transport-specific feature bits. */
125void vring_transport_features(struct virtio_device *vdev); 131void vring_transport_features(struct virtio_device *vdev);
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
index 0627a9ae6347..3d138c1fcf8a 100644
--- a/include/scsi/fc/fc_fip.h
+++ b/include/scsi/fc/fc_fip.h
@@ -22,13 +22,6 @@
22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf 22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
23 */ 23 */
24 24
25/*
26 * The FIP ethertype eventually goes in net/if_ether.h.
27 */
28#ifndef ETH_P_FIP
29#define ETH_P_FIP 0x8914 /* FIP Ethertype */
30#endif
31
32#define FIP_DEF_PRI 128 /* default selection priority */ 25#define FIP_DEF_PRI 128 /* default selection priority */
33#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */ 26#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
34#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */ 27#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index d0ed5226f8c4..4426f00da5ff 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -22,6 +22,11 @@
22#define ISCSI_IF_H 22#define ISCSI_IF_H
23 23
24#include <scsi/iscsi_proto.h> 24#include <scsi/iscsi_proto.h>
25#include <linux/in.h>
26#include <linux/in6.h>
27
28#define ISCSI_NL_GRP_ISCSID 1
29#define ISCSI_NL_GRP_UIP 2
25 30
26#define UEVENT_BASE 10 31#define UEVENT_BASE 10
27#define KEVENT_BASE 100 32#define KEVENT_BASE 100
@@ -50,7 +55,10 @@ enum iscsi_uevent_e {
50 ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, 55 ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
51 ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, 56 ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
52 ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, 57 ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
53 ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18, 58 ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
59 ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19,
60
61 ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20,
54 62
55 /* up events */ 63 /* up events */
56 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 64 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -59,6 +67,9 @@ enum iscsi_uevent_e {
59 ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, 67 ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
60 ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5, 68 ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
61 ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6, 69 ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
70
71 ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7,
72 ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8,
62}; 73};
63 74
64enum iscsi_tgt_dscvr { 75enum iscsi_tgt_dscvr {
@@ -131,6 +142,10 @@ struct iscsi_uevent {
131 struct msg_transport_connect { 142 struct msg_transport_connect {
132 uint32_t non_blocking; 143 uint32_t non_blocking;
133 } ep_connect; 144 } ep_connect;
145 struct msg_transport_connect_through_host {
146 uint32_t host_no;
147 uint32_t non_blocking;
148 } ep_connect_through_host;
134 struct msg_transport_poll { 149 struct msg_transport_poll {
135 uint64_t ep_handle; 150 uint64_t ep_handle;
136 uint32_t timeout_ms; 151 uint32_t timeout_ms;
@@ -154,6 +169,9 @@ struct iscsi_uevent {
154 uint32_t param; /* enum iscsi_host_param */ 169 uint32_t param; /* enum iscsi_host_param */
155 uint32_t len; 170 uint32_t len;
156 } set_host_param; 171 } set_host_param;
172 struct msg_set_path {
173 uint32_t host_no;
174 } set_path;
157 } u; 175 } u;
158 union { 176 union {
159 /* messages k -> u */ 177 /* messages k -> u */
@@ -187,10 +205,39 @@ struct iscsi_uevent {
187 struct msg_transport_connect_ret { 205 struct msg_transport_connect_ret {
188 uint64_t handle; 206 uint64_t handle;
189 } ep_connect_ret; 207 } ep_connect_ret;
208 struct msg_req_path {
209 uint32_t host_no;
210 } req_path;
211 struct msg_notify_if_down {
212 uint32_t host_no;
213 } notify_if_down;
190 } r; 214 } r;
191} __attribute__ ((aligned (sizeof(uint64_t)))); 215} __attribute__ ((aligned (sizeof(uint64_t))));
192 216
193/* 217/*
218 * To keep the struct iscsi_uevent size the same for userspace code
219 * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and
220 * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the
221 * struct iscsi_uevent in the NETLINK_ISCSI message.
222 */
223struct iscsi_path {
224 uint64_t handle;
225 uint8_t mac_addr[6];
226 uint8_t mac_addr_old[6];
227 uint32_t ip_addr_len; /* 4 or 16 */
228 union {
229 struct in_addr v4_addr;
230 struct in6_addr v6_addr;
231 } src;
232 union {
233 struct in_addr v4_addr;
234 struct in6_addr v6_addr;
235 } dst;
236 uint16_t vlan_id;
237 uint16_t pmtu;
238} __attribute__ ((aligned (sizeof(uint64_t))));
239
240/*
194 * Common error codes 241 * Common error codes
195 */ 242 */
196enum iscsi_err { 243enum iscsi_err {
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 45f9cc642c46..ebdd9f4cf070 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -679,6 +679,7 @@ struct fc_lport {
679 unsigned int e_d_tov; 679 unsigned int e_d_tov;
680 unsigned int r_a_tov; 680 unsigned int r_a_tov;
681 u8 max_retry_count; 681 u8 max_retry_count;
682 u8 max_rport_retry_count;
682 u16 link_speed; 683 u16 link_speed;
683 u16 link_supported_speeds; 684 u16 link_supported_speeds;
684 u16 lro_xid; /* max xid for fcoe lro */ 685 u16 lro_xid; /* max xid for fcoe lro */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 0289f5745fb9..196525cd402f 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -82,9 +82,12 @@ enum {
82 82
83 83
84enum { 84enum {
85 ISCSI_TASK_FREE,
85 ISCSI_TASK_COMPLETED, 86 ISCSI_TASK_COMPLETED,
86 ISCSI_TASK_PENDING, 87 ISCSI_TASK_PENDING,
87 ISCSI_TASK_RUNNING, 88 ISCSI_TASK_RUNNING,
89 ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */
90 ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */
88}; 91};
89 92
90struct iscsi_r2t_info { 93struct iscsi_r2t_info {
@@ -181,9 +184,7 @@ struct iscsi_conn {
181 184
182 /* xmit */ 185 /* xmit */
183 struct list_head mgmtqueue; /* mgmt (control) xmit queue */ 186 struct list_head mgmtqueue; /* mgmt (control) xmit queue */
184 struct list_head mgmt_run_list; /* list of control tasks */ 187 struct list_head cmdqueue; /* data-path cmd queue */
185 struct list_head xmitqueue; /* data-path cmd queue */
186 struct list_head run_list; /* list of cmds in progress */
187 struct list_head requeue; /* tasks needing another run */ 188 struct list_head requeue; /* tasks needing another run */
188 struct work_struct xmitwork; /* per-conn. xmit workqueue */ 189 struct work_struct xmitwork; /* per-conn. xmit workqueue */
189 unsigned long suspend_tx; /* suspend Tx */ 190 unsigned long suspend_tx; /* suspend Tx */
@@ -406,6 +407,7 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
406 char *, int); 407 char *, int);
407extern int iscsi_verify_itt(struct iscsi_conn *, itt_t); 408extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
408extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t); 409extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
410extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
409extern void iscsi_requeue_task(struct iscsi_task *task); 411extern void iscsi_requeue_task(struct iscsi_task *task);
410extern void iscsi_put_task(struct iscsi_task *task); 412extern void iscsi_put_task(struct iscsi_task *task);
411extern void __iscsi_get_task(struct iscsi_task *task); 413extern void __iscsi_get_task(struct iscsi_task *task);
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h
index f888a6fda073..56e920ade326 100644
--- a/include/scsi/osd_attributes.h
+++ b/include/scsi/osd_attributes.h
@@ -29,6 +29,7 @@ enum {
29 OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1, 29 OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
30 OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2, 30 OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2,
31 OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3, 31 OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3,
32 OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4,
32 OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5, 33 OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5,
33 OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF, 34 OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF,
34 35
@@ -51,7 +52,9 @@ enum {
51 OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF, 52 OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF,
52 53
53 OSD_APAGE_COMMON_FIRST = 0xF0000000, 54 OSD_APAGE_COMMON_FIRST = 0xF0000000,
54 OSD_APAGE_COMMON_LAST = 0xFFFFFFFE, 55 OSD_APAGE_COMMON_LAST = 0xFFFFFFFD,
56
57 OSD_APAGE_CURRENT_COMMAND = 0xFFFFFFFE,
55 58
56 OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF, 59 OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF,
57}; 60};
@@ -106,10 +109,30 @@ enum {
106 OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */ 109 OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */
107 OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */ 110 OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */
108 OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */ 111 OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */
112 OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA, /* 4 */
109 OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */ 113 OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */
110 OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */ 114 OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */
111 OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */ 115 OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */
112 OSD_ATTR_RI_CLOCK = 0x100, /* 6 */ 116 OSD_ATTR_RI_CLOCK = 0x100, /* 6 */
117 OARI_DEFAULT_ISOLATION_METHOD = 0X110, /* 1 */
118 OARI_SUPPORTED_ISOLATION_METHODS = 0X111, /* 32 */
119
120 OARI_DATA_ATOMICITY_GUARANTEE = 0X120, /* 8 */
121 OARI_DATA_ATOMICITY_ALIGNMENT = 0X121, /* 8 */
122 OARI_ATTRIBUTES_ATOMICITY_GUARANTEE = 0X122, /* 8 */
123 OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER = 0X123, /* 1 */
124
125 OARI_MAXIMUM_SNAPSHOTS_COUNT = 0X1C1, /* 0 or 4 */
126 OARI_MAXIMUM_CLONES_COUNT = 0X1C2, /* 0 or 4 */
127 OARI_MAXIMUM_BRANCH_DEPTH = 0X1CC, /* 0 or 4 */
128 OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST = 0X200, /* 0 or 4 */
129 OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST = 0X2ff, /* 0 or 4 */
130 OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300, /* 0 or 4 */
131 OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST = 0X30F, /* 0 or 4 */
132 OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING = 0X310, /* 0 or 4 */
133 OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING = 0X311, /* 0 or 1 */
134 OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */
135 OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST = 0X700FFFF,/* 0 or 4 */
113}; 136};
114/* Root_Information_attributes_page does not have a get_page structure */ 137/* Root_Information_attributes_page does not have a get_page structure */
115 138
@@ -120,7 +143,15 @@ enum {
120 OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */ 143 OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */
121 OSD_ATTR_PI_USERNAME = 0x9, /* variable */ 144 OSD_ATTR_PI_USERNAME = 0x9, /* variable */
122 OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */ 145 OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */
146 OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84, /* 0 or 8 */
123 OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */ 147 OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */
148
149 OSD_ATTR_PI_ACTUAL_DATA_SPACE = 0xD1, /* 0 or 8 */
150 OSD_ATTR_PI_RESERVED_DATA_SPACE = 0xD2, /* 0 or 8 */
151 OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD = 0x200,/* 0 or 4 */
152 OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD = 0x201,/* 0 or 4 */
153 OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION = 0x300,/* 0 or 4 */
154 OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION = 0x301,/* 0 or 4 */
124}; 155};
125/* Partition Information attributes page does not have a get_page structure */ 156/* Partition Information attributes page does not have a get_page structure */
126 157
@@ -131,6 +162,7 @@ enum {
131 OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */ 162 OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */
132 OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */ 163 OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */
133 OSD_ATTR_CI_USERNAME = 0x9, /* variable */ 164 OSD_ATTR_CI_USERNAME = 0x9, /* variable */
165 OSD_ATTR_CI_COLLECTION_TYPE = 0xA, /* 1 */
134 OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */ 166 OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */
135}; 167};
136/* Collection Information attributes page does not have a get_page structure */ 168/* Collection Information attributes page does not have a get_page structure */
@@ -144,6 +176,8 @@ enum {
144 OSD_ATTR_OI_USERNAME = 0x9, /* variable */ 176 OSD_ATTR_OI_USERNAME = 0x9, /* variable */
145 OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */ 177 OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */
146 OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */ 178 OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */
179 SD_ATTR_OI_ACTUAL_DATA_SPACE = 0XD1, /* 0 OR 8 */
180 SD_ATTR_OI_RESERVED_DATA_SPACE = 0XD2, /* 0 OR 8 */
147}; 181};
148/* Object Information attributes page does not have a get_page structure */ 182/* Object Information attributes page does not have a get_page structure */
149 183
@@ -248,7 +282,18 @@ struct object_timestamps_attributes_page {
248 struct osd_timestamp data_modified_time; 282 struct osd_timestamp data_modified_time;
249} __packed; 283} __packed;
250 284
251/* 7.1.2.19 Collections attributes page */ 285/* OSD2r05: 7.1.3.19 Attributes Access attributes page
286 * (OSD_APAGE_PARTITION_ATTR_ACCESS)
287 *
288 * each attribute is of the form below. Total array length is deduced
289 * from the attribute's length
290 * (See allowed_attributes_access of the struct osd_cap_object_descriptor)
291 */
292struct attributes_access_attr {
293 struct osd_attributes_list_attrid attr_list[0];
294} __packed;
295
296/* OSD2r05: 7.1.2.21 Collections attributes page */
252/* TBD */ 297/* TBD */
253 298
254/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */ 299/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
@@ -324,4 +369,29 @@ struct object_security_attributes_page {
324 __be32 policy_access_tag; 369 __be32 policy_access_tag;
325} __packed; 370} __packed;
326 371
372/* OSD2r05: 7.1.3.31 Current Command attributes page
373 * (OSD_APAGE_CURRENT_COMMAND)
374 */
375enum {
376 OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE = 0x1, /* 32 */
377 OSD_ATTR_CC_OBJECT_TYPE = 0x2, /* 1 */
378 OSD_ATTR_CC_PARTITION_ID = 0x3, /* 8 */
379 OSD_ATTR_CC_OBJECT_ID = 0x4, /* 8 */
380 OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND = 0x5, /* 8 */
381 OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY = 0x6, /* 8 */
382};
383
384/*TBD: osdv1_current_command_attributes_page */
385
386struct osdv2_current_command_attributes_page {
387 struct osd_attr_page_header hdr; /* id=0xFFFFFFFE, size=0x44 */
388 u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
389 u8 object_type;
390 u8 reserved[3];
391 __be64 partition_id;
392 __be64 object_id;
393 __be64 starting_byte_address_of_append;
394 __be64 change_in_used_capacity;
395};
396
327#endif /*ndef __OSD_ATTRIBUTES_H__*/ 397#endif /*ndef __OSD_ATTRIBUTES_H__*/
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index b24d9616eb46..02bd9f716357 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -18,6 +18,7 @@
18#include "osd_types.h" 18#include "osd_types.h"
19 19
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <scsi/scsi_device.h>
21 22
22/* Note: "NI" in comments below means "Not Implemented yet" */ 23/* Note: "NI" in comments below means "Not Implemented yet" */
23 24
@@ -47,6 +48,7 @@ enum osd_std_version {
47 */ 48 */
48struct osd_dev { 49struct osd_dev {
49 struct scsi_device *scsi_device; 50 struct scsi_device *scsi_device;
51 struct file *file;
50 unsigned def_timeout; 52 unsigned def_timeout;
51 53
52#ifdef OSD_VER1_SUPPORT 54#ifdef OSD_VER1_SUPPORT
@@ -69,6 +71,10 @@ void osd_dev_fini(struct osd_dev *od);
69 71
70/* some hi level device operations */ 72/* some hi level device operations */
71int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */ 73int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */
74static inline struct request_queue *osd_request_queue(struct osd_dev *od)
75{
76 return od->scsi_device->request_queue;
77}
72 78
73/* we might want to use function vector in the future */ 79/* we might want to use function vector in the future */
74static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v) 80static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v)
@@ -363,7 +369,9 @@ void osd_req_create_object(struct osd_request *or, struct osd_obj_id *);
363void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *); 369void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *);
364 370
365void osd_req_write(struct osd_request *or, 371void osd_req_write(struct osd_request *or,
366 const struct osd_obj_id *, struct bio *data_out, u64 offset); 372 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
373int osd_req_write_kern(struct osd_request *or,
374 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
367void osd_req_append(struct osd_request *or, 375void osd_req_append(struct osd_request *or,
368 const struct osd_obj_id *, struct bio *data_out);/* NI */ 376 const struct osd_obj_id *, struct bio *data_out);/* NI */
369void osd_req_create_write(struct osd_request *or, 377void osd_req_create_write(struct osd_request *or,
@@ -378,7 +386,9 @@ void osd_req_flush_object(struct osd_request *or,
378 /*V2*/ u64 offset, /*V2*/ u64 len); 386 /*V2*/ u64 offset, /*V2*/ u64 len);
379 387
380void osd_req_read(struct osd_request *or, 388void osd_req_read(struct osd_request *or,
381 const struct osd_obj_id *, struct bio *data_in, u64 offset); 389 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
390int osd_req_read_kern(struct osd_request *or,
391 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
382 392
383/* 393/*
384 * Root/Partition/Collection/Object Attributes commands 394 * Root/Partition/Collection/Object Attributes commands
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index 62b2ab8c69d4..2cc8e8b1cc19 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -303,7 +303,15 @@ enum osd_service_actions {
303 OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21) 303 OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21)
304 OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22) 304 OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22)
305 OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23) 305 OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23)
306
307 OSD_ACT_V2(CREATE_CLONE, 0x28)
308 OSD_ACT_V2(CREATE_SNAPSHOT, 0x29)
309 OSD_ACT_V2(DETACH_CLONE, 0x2A)
310 OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE, 0x2B)
311 OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C)
312
306 OSD_ACT_V2(READ_MAP, 0x31) 313 OSD_ACT_V2(READ_MAP, 0x31)
314 OSD_ACT_V2(READ_MAPS_COMPARE, 0x32)
307 315
308 OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C) 316 OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C)
309 OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D) 317 OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D)
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 457588e1119b..349c7f30720d 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -126,12 +126,14 @@ struct iscsi_transport {
126 int *index, int *age); 126 int *index, int *age);
127 127
128 void (*session_recovery_timedout) (struct iscsi_cls_session *session); 128 void (*session_recovery_timedout) (struct iscsi_cls_session *session);
129 struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr, 129 struct iscsi_endpoint *(*ep_connect) (struct Scsi_Host *shost,
130 struct sockaddr *dst_addr,
130 int non_blocking); 131 int non_blocking);
131 int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms); 132 int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
132 void (*ep_disconnect) (struct iscsi_endpoint *ep); 133 void (*ep_disconnect) (struct iscsi_endpoint *ep);
133 int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, 134 int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
134 uint32_t enable, struct sockaddr *dst_addr); 135 uint32_t enable, struct sockaddr *dst_addr);
136 int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params);
135}; 137};
136 138
137/* 139/*
@@ -148,6 +150,10 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
148extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 150extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
149 char *data, uint32_t data_size); 151 char *data, uint32_t data_size);
150 152
153extern int iscsi_offload_mesg(struct Scsi_Host *shost,
154 struct iscsi_transport *transport, uint32_t type,
155 char *data, uint16_t data_size);
156
151struct iscsi_cls_conn { 157struct iscsi_cls_conn {
152 struct list_head conn_list; /* item in connlist */ 158 struct list_head conn_list; /* item in connlist */
153 void *dd_data; /* LLD private data */ 159 void *dd_data; /* LLD private data */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 6add80fc2512..82aed3f47534 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -255,6 +255,7 @@ typedef int __bitwise snd_pcm_subformat_t;
255#define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */ 255#define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */
256#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */ 256#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
257#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */ 257#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
258#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
258 259
259typedef int __bitwise snd_pcm_state_t; 260typedef int __bitwise snd_pcm_state_t;
260#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */ 261#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
diff --git a/include/sound/core.h b/include/sound/core.h
index 3dea79829acc..309cb9659a05 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -300,19 +300,10 @@ int snd_card_create(int idx, const char *id,
300 struct module *module, int extra_size, 300 struct module *module, int extra_size,
301 struct snd_card **card_ret); 301 struct snd_card **card_ret);
302 302
303static inline __deprecated
304struct snd_card *snd_card_new(int idx, const char *id,
305 struct module *module, int extra_size)
306{
307 struct snd_card *card;
308 if (snd_card_create(idx, id, module, extra_size, &card) < 0)
309 return NULL;
310 return card;
311}
312
313int snd_card_disconnect(struct snd_card *card); 303int snd_card_disconnect(struct snd_card *card);
314int snd_card_free(struct snd_card *card); 304int snd_card_free(struct snd_card *card);
315int snd_card_free_when_closed(struct snd_card *card); 305int snd_card_free_when_closed(struct snd_card *card);
306void snd_card_set_id(struct snd_card *card, const char *id);
316int snd_card_register(struct snd_card *card); 307int snd_card_register(struct snd_card *card);
317int snd_card_info_init(void); 308int snd_card_info_init(void);
318int snd_card_info_done(void); 309int snd_card_info_done(void);
diff --git a/include/sound/driver.h b/include/sound/driver.h
deleted file mode 100644
index f0359437d01a..000000000000
--- a/include/sound/driver.h
+++ /dev/null
@@ -1 +0,0 @@
1#warning "This file is deprecated"
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c17296891617..23893523dc8c 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -98,6 +98,7 @@ struct snd_pcm_ops {
98#define SNDRV_PCM_IOCTL1_INFO 1 98#define SNDRV_PCM_IOCTL1_INFO 1
99#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2 99#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
100#define SNDRV_PCM_IOCTL1_GSTATE 3 100#define SNDRV_PCM_IOCTL1_GSTATE 3
101#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
101 102
102#define SNDRV_PCM_TRIGGER_STOP 0 103#define SNDRV_PCM_TRIGGER_STOP 0
103#define SNDRV_PCM_TRIGGER_START 1 104#define SNDRV_PCM_TRIGGER_START 1
@@ -270,6 +271,7 @@ struct snd_pcm_runtime {
270 snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ 271 snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
271 snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */ 272 snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */
272 unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */ 273 unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
274 snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */
273 275
274 /* -- HW params -- */ 276 /* -- HW params -- */
275 snd_pcm_access_t access; /* access mode */ 277 snd_pcm_access_t access; /* access mode */
@@ -486,80 +488,6 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream);
486void snd_pcm_vma_notify_data(void *client, void *data); 488void snd_pcm_vma_notify_data(void *client, void *data);
487int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); 489int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area);
488 490
489#if BITS_PER_LONG >= 64
490
491static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem)
492{
493 *rem = *n % div;
494 *n /= div;
495}
496
497#elif defined(i386)
498
499static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem)
500{
501 u_int32_t low, high;
502 low = *n & 0xffffffff;
503 high = *n >> 32;
504 if (high) {
505 u_int32_t high1 = high % div;
506 high /= div;
507 asm("divl %2":"=a" (low), "=d" (*rem):"rm" (div), "a" (low), "d" (high1));
508 *n = (u_int64_t)high << 32 | low;
509 } else {
510 *n = low / div;
511 *rem = low % div;
512 }
513}
514#else
515
516static inline void divl(u_int32_t high, u_int32_t low,
517 u_int32_t div,
518 u_int32_t *q, u_int32_t *r)
519{
520 u_int64_t n = (u_int64_t)high << 32 | low;
521 u_int64_t d = (u_int64_t)div << 31;
522 u_int32_t q1 = 0;
523 int c = 32;
524 while (n > 0xffffffffU) {
525 q1 <<= 1;
526 if (n >= d) {
527 n -= d;
528 q1 |= 1;
529 }
530 d >>= 1;
531 c--;
532 }
533 q1 <<= c;
534 if (n) {
535 low = n;
536 *q = q1 | (low / div);
537 *r = low % div;
538 } else {
539 *r = 0;
540 *q = q1;
541 }
542 return;
543}
544
545static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem)
546{
547 u_int32_t low, high;
548 low = *n & 0xffffffff;
549 high = *n >> 32;
550 if (high) {
551 u_int32_t high1 = high % div;
552 u_int32_t low1 = low;
553 high /= div;
554 divl(high1, low1, div, &low, rem);
555 *n = (u_int64_t)high << 32 | low;
556 } else {
557 *n = low / div;
558 *rem = low % div;
559 }
560}
561#endif
562
563/* 491/*
564 * PCM library 492 * PCM library
565 */ 493 */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 13676472ddfc..352d7eee9b6d 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -45,24 +45,6 @@ struct snd_pcm_substream;
45#define SND_SOC_DAIFMT_GATED (1 << 4) /* clock is gated */ 45#define SND_SOC_DAIFMT_GATED (1 << 4) /* clock is gated */
46 46
47/* 47/*
48 * DAI Left/Right Clocks.
49 *
50 * Specifies whether the DAI can support different samples for similtanious
51 * playback and capture. This usually requires a seperate physical frame
52 * clock for playback and capture.
53 */
54#define SND_SOC_DAIFMT_SYNC (0 << 5) /* Tx FRM = Rx FRM */
55#define SND_SOC_DAIFMT_ASYNC (1 << 5) /* Tx FRM ~ Rx FRM */
56
57/*
58 * TDM
59 *
60 * Time Division Multiplexing. Allows PCM data to be multplexed with other
61 * data on the DAI.
62 */
63#define SND_SOC_DAIFMT_TDM (1 << 6)
64
65/*
66 * DAI hardware signal inversions. 48 * DAI hardware signal inversions.
67 * 49 *
68 * Specifies whether the DAI can also support inverted clocks for the specified 50 * Specifies whether the DAI can also support inverted clocks for the specified
@@ -96,6 +78,10 @@ struct snd_pcm_substream;
96#define SND_SOC_CLOCK_IN 0 78#define SND_SOC_CLOCK_IN 0
97#define SND_SOC_CLOCK_OUT 1 79#define SND_SOC_CLOCK_OUT 1
98 80
81#define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S16_LE |\
82 SNDRV_PCM_FMTBIT_S32_LE |\
83 SNDRV_PCM_FMTBIT_S32_BE)
84
99struct snd_soc_dai_ops; 85struct snd_soc_dai_ops;
100struct snd_soc_dai; 86struct snd_soc_dai;
101struct snd_ac97_bus_ops; 87struct snd_ac97_bus_ops;
@@ -208,6 +194,7 @@ struct snd_soc_dai {
208 /* DAI capabilities */ 194 /* DAI capabilities */
209 struct snd_soc_pcm_stream capture; 195 struct snd_soc_pcm_stream capture;
210 struct snd_soc_pcm_stream playback; 196 struct snd_soc_pcm_stream playback;
197 unsigned int symmetric_rates:1;
211 198
212 /* DAI runtime info */ 199 /* DAI runtime info */
213 struct snd_pcm_runtime *runtime; 200 struct snd_pcm_runtime *runtime;
@@ -219,11 +206,8 @@ struct snd_soc_dai {
219 /* DAI private data */ 206 /* DAI private data */
220 void *private_data; 207 void *private_data;
221 208
222 /* parent codec/platform */ 209 /* parent platform */
223 union { 210 struct snd_soc_platform *platform;
224 struct snd_soc_codec *codec;
225 struct snd_soc_platform *platform;
226 };
227 211
228 struct list_head list; 212 struct list_head list;
229}; 213};
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a7def6a9a030..ec8a45f9a069 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -140,16 +140,30 @@
140#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \ 140#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
141{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \ 141{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
142 .shift = wshift, .invert = winvert} 142 .shift = wshift, .invert = winvert}
143#define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
144 wevent, wflags) \
145{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
146 .shift = wshift, .invert = winvert, \
147 .event = wevent, .event_flags = wflags}
143#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \ 148#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
144{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \ 149{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
145 .shift = wshift, .invert = winvert} 150 .shift = wshift, .invert = winvert}
151#define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
152 wevent, wflags) \
153{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
154 .shift = wshift, .invert = winvert, \
155 .event = wevent, .event_flags = wflags}
146 156
147/* generic register modifier widget */ 157/* generic widgets */
148#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \ 158#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \
149{ .id = wid, .name = wname, .kcontrols = NULL, .num_kcontrols = 0, \ 159{ .id = wid, .name = wname, .kcontrols = NULL, .num_kcontrols = 0, \
150 .reg = -((wreg) + 1), .shift = wshift, .mask = wmask, \ 160 .reg = -((wreg) + 1), .shift = wshift, .mask = wmask, \
151 .on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \ 161 .on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \
152 .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} 162 .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
163#define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
164{ .id = snd_soc_dapm_supply, .name = wname, .reg = wreg, \
165 .shift = wshift, .invert = winvert, .event = wevent, \
166 .event_flags = wflags}
153 167
154/* dapm kcontrol types */ 168/* dapm kcontrol types */
155#define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \ 169#define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \
@@ -265,8 +279,6 @@ int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
265/* dapm events */ 279/* dapm events */
266int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream, 280int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream,
267 int event); 281 int event);
268int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev,
269 enum snd_soc_bias_level level);
270 282
271/* dapm sys fs - used by the core */ 283/* dapm sys fs - used by the core */
272int snd_soc_dapm_sys_add(struct device *dev); 284int snd_soc_dapm_sys_add(struct device *dev);
@@ -298,6 +310,7 @@ enum snd_soc_dapm_type {
298 snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */ 310 snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */
299 snd_soc_dapm_pre, /* machine specific pre widget - exec first */ 311 snd_soc_dapm_pre, /* machine specific pre widget - exec first */
300 snd_soc_dapm_post, /* machine specific post widget - exec last */ 312 snd_soc_dapm_post, /* machine specific post widget - exec last */
313 snd_soc_dapm_supply, /* power/clock supply */
301}; 314};
302 315
303/* 316/*
@@ -357,6 +370,8 @@ struct snd_soc_dapm_widget {
357 unsigned char suspend:1; /* was active before suspend */ 370 unsigned char suspend:1; /* was active before suspend */
358 unsigned char pmdown:1; /* waiting for timeout */ 371 unsigned char pmdown:1; /* waiting for timeout */
359 372
373 int (*power_check)(struct snd_soc_dapm_widget *w);
374
360 /* external events */ 375 /* external events */
361 unsigned short event_flags; /* flags to specify event types */ 376 unsigned short event_flags; /* flags to specify event types */
362 int (*event)(struct snd_soc_dapm_widget*, struct snd_kcontrol *, int); 377 int (*event)(struct snd_soc_dapm_widget*, struct snd_kcontrol *, int);
@@ -368,6 +383,9 @@ struct snd_soc_dapm_widget {
368 /* widget input and outputs */ 383 /* widget input and outputs */
369 struct list_head sources; 384 struct list_head sources;
370 struct list_head sinks; 385 struct list_head sinks;
386
387 /* used during DAPM updates */
388 struct list_head power_list;
371}; 389};
372 390
373#endif 391#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index a40bc6f316fc..cf6111d72b17 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -118,6 +118,14 @@
118 .info = snd_soc_info_volsw, \ 118 .info = snd_soc_info_volsw, \
119 .get = xhandler_get, .put = xhandler_put, \ 119 .get = xhandler_get, .put = xhandler_put, \
120 .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) } 120 .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
121#define SOC_DOUBLE_EXT(xname, xreg, shift_left, shift_right, xmax, xinvert,\
122 xhandler_get, xhandler_put) \
123{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
124 .info = snd_soc_info_volsw, \
125 .get = xhandler_get, .put = xhandler_put, \
126 .private_value = (unsigned long)&(struct soc_mixer_control) \
127 {.reg = xreg, .shift = shift_left, .rshift = shift_right, \
128 .max = xmax, .invert = xinvert} }
121#define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\ 129#define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
122 xhandler_get, xhandler_put, tlv_array) \ 130 xhandler_get, xhandler_put, tlv_array) \
123{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 131{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -206,10 +214,6 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
206 struct snd_soc_jack_gpio *gpios); 214 struct snd_soc_jack_gpio *gpios);
207#endif 215#endif
208 216
209/* codec IO */
210#define snd_soc_read(codec, reg) codec->read(codec, reg)
211#define snd_soc_write(codec, reg, value) codec->write(codec, reg, value)
212
213/* codec register bit access */ 217/* codec register bit access */
214int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, 218int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
215 unsigned short mask, unsigned short value); 219 unsigned short mask, unsigned short value);
@@ -331,6 +335,7 @@ struct snd_soc_codec {
331 struct module *owner; 335 struct module *owner;
332 struct mutex mutex; 336 struct mutex mutex;
333 struct device *dev; 337 struct device *dev;
338 struct snd_soc_device *socdev;
334 339
335 struct list_head list; 340 struct list_head list;
336 341
@@ -364,6 +369,8 @@ struct snd_soc_codec {
364 enum snd_soc_bias_level bias_level; 369 enum snd_soc_bias_level bias_level;
365 enum snd_soc_bias_level suspend_bias_level; 370 enum snd_soc_bias_level suspend_bias_level;
366 struct delayed_work delayed_work; 371 struct delayed_work delayed_work;
372 struct list_head up_list;
373 struct list_head down_list;
367 374
368 /* codec DAI's */ 375 /* codec DAI's */
369 struct snd_soc_dai *dai; 376 struct snd_soc_dai *dai;
@@ -417,6 +424,12 @@ struct snd_soc_dai_link {
417 /* codec/machine specific init - e.g. add machine controls */ 424 /* codec/machine specific init - e.g. add machine controls */
418 int (*init)(struct snd_soc_codec *codec); 425 int (*init)(struct snd_soc_codec *codec);
419 426
427 /* Symmetry requirements */
428 unsigned int symmetric_rates:1;
429
430 /* Symmetry data - only valid if symmetry is being enforced */
431 unsigned int rate;
432
420 /* DAI pcm */ 433 /* DAI pcm */
421 struct snd_pcm *pcm; 434 struct snd_pcm *pcm;
422}; 435};
@@ -490,6 +503,19 @@ struct soc_enum {
490 void *dapm; 503 void *dapm;
491}; 504};
492 505
506/* codec IO */
507static inline unsigned int snd_soc_read(struct snd_soc_codec *codec,
508 unsigned int reg)
509{
510 return codec->read(codec, reg);
511}
512
513static inline unsigned int snd_soc_write(struct snd_soc_codec *codec,
514 unsigned int reg, unsigned int val)
515{
516 return codec->write(codec, reg, val);
517}
518
493#include <sound/soc-dai.h> 519#include <sound/soc-dai.h>
494 520
495#endif 521#endif
diff --git a/include/sound/wm9081.h b/include/sound/wm9081.h
new file mode 100644
index 000000000000..e173ddbf6bd4
--- /dev/null
+++ b/include/sound/wm9081.h
@@ -0,0 +1,25 @@
1/*
2 * linux/sound/wm9081.h -- Platform data for WM9081
3 *
4 * Copyright 2009 Wolfson Microelectronics. PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __LINUX_SND_WM_9081_H
12#define __LINUX_SND_WM_9081_H
13
14struct wm9081_retune_mobile_setting {
15 const char *name;
16 unsigned int rate;
17 u16 config[20];
18};
19
20struct wm9081_retune_mobile_config {
21 struct wm9081_retune_mobile_setting *configs;
22 int num_configs;
23};
24
25#endif
diff --git a/init/Kconfig b/init/Kconfig
index c649657e2259..d3a50967c337 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -936,6 +936,8 @@ config AIO
936 936
937config HAVE_PERF_COUNTERS 937config HAVE_PERF_COUNTERS
938 bool 938 bool
939 help
940 See tools/perf/design.txt for details.
939 941
940menu "Performance Counters" 942menu "Performance Counters"
941 943
diff --git a/init/main.c b/init/main.c
index 5616661eac01..f6204f712e7c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -539,6 +539,11 @@ void __init __weak thread_info_cache_init(void)
539 */ 539 */
540static void __init mm_init(void) 540static void __init mm_init(void)
541{ 541{
542 /*
543 * page_cgroup requires countinous pages as memmap
544 * and it's bigger than MAX_ORDER unless SPARSEMEM.
545 */
546 page_cgroup_init_flatmem();
542 mem_init(); 547 mem_init();
543 kmem_cache_init(); 548 kmem_cache_init();
544 vmalloc_init(); 549 vmalloc_init();
@@ -635,6 +640,7 @@ asmlinkage void __init start_kernel(void)
635 "enabled early\n"); 640 "enabled early\n");
636 early_boot_irqs_on(); 641 early_boot_irqs_on();
637 local_irq_enable(); 642 local_irq_enable();
643 kmem_cache_init_late();
638 644
639 /* 645 /*
640 * HACK ALERT! This is early. We're enabling the console before 646 * HACK ALERT! This is early. We're enabling the console before
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 104578541230..065205bdd920 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -45,7 +45,7 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
45#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 45#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
46static void __init init_irq_default_affinity(void) 46static void __init init_irq_default_affinity(void)
47{ 47{
48 alloc_bootmem_cpumask_var(&irq_default_affinity); 48 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
49 cpumask_setall(irq_default_affinity); 49 cpumask_setall(irq_default_affinity);
50} 50}
51#else 51#else
diff --git a/kernel/kexec.c b/kernel/kexec.c
index e4983770913b..ae1c35201cc8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1448,17 +1448,17 @@ int kernel_kexec(void)
1448 goto Restore_console; 1448 goto Restore_console;
1449 } 1449 }
1450 suspend_console(); 1450 suspend_console();
1451 error = device_suspend(PMSG_FREEZE); 1451 error = dpm_suspend_start(PMSG_FREEZE);
1452 if (error) 1452 if (error)
1453 goto Resume_console; 1453 goto Resume_console;
1454 /* At this point, device_suspend() has been called, 1454 /* At this point, dpm_suspend_start() has been called,
1455 * but *not* device_power_down(). We *must* 1455 * but *not* dpm_suspend_noirq(). We *must* call
1456 * device_power_down() now. Otherwise, drivers for 1456 * dpm_suspend_noirq() now. Otherwise, drivers for
1457 * some devices (e.g. interrupt controllers) become 1457 * some devices (e.g. interrupt controllers) become
1458 * desynchronized with the actual state of the 1458 * desynchronized with the actual state of the
1459 * hardware at resume time, and evil weirdness ensues. 1459 * hardware at resume time, and evil weirdness ensues.
1460 */ 1460 */
1461 error = device_power_down(PMSG_FREEZE); 1461 error = dpm_suspend_noirq(PMSG_FREEZE);
1462 if (error) 1462 if (error)
1463 goto Resume_devices; 1463 goto Resume_devices;
1464 error = disable_nonboot_cpus(); 1464 error = disable_nonboot_cpus();
@@ -1486,9 +1486,9 @@ int kernel_kexec(void)
1486 local_irq_enable(); 1486 local_irq_enable();
1487 Enable_cpus: 1487 Enable_cpus:
1488 enable_nonboot_cpus(); 1488 enable_nonboot_cpus();
1489 device_power_up(PMSG_RESTORE); 1489 dpm_resume_noirq(PMSG_RESTORE);
1490 Resume_devices: 1490 Resume_devices:
1491 device_resume(PMSG_RESTORE); 1491 dpm_resume_end(PMSG_RESTORE);
1492 Resume_console: 1492 Resume_console:
1493 resume_console(); 1493 resume_console();
1494 thaw_processes(); 1494 thaw_processes();
diff --git a/kernel/module.c b/kernel/module.c
index 35f7de00bf0d..e4ab36ce7672 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2455,6 +2455,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2455 mutex_lock(&module_mutex); 2455 mutex_lock(&module_mutex);
2456 /* Drop initial reference. */ 2456 /* Drop initial reference. */
2457 module_put(mod); 2457 module_put(mod);
2458 trim_init_extable(mod);
2458 module_free(mod, mod->module_init); 2459 module_free(mod, mod->module_init);
2459 mod->module_init = NULL; 2460 mod->module_init = NULL;
2460 mod->init_size = 0; 2461 mod->init_size = 0;
diff --git a/kernel/params.c b/kernel/params.c
index de273ec85bd2..7f6912ced2ba 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,9 +24,6 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27/* We abuse the high bits of "perm" to record whether we kmalloc'ed. */
28#define KPARAM_KMALLOCED 0x80000000
29
30#if 0 27#if 0
31#define DEBUGP printk 28#define DEBUGP printk
32#else 29#else
@@ -220,13 +217,13 @@ int param_set_charp(const char *val, struct kernel_param *kp)
220 return -ENOSPC; 217 return -ENOSPC;
221 } 218 }
222 219
223 if (kp->perm & KPARAM_KMALLOCED) 220 if (kp->flags & KPARAM_KMALLOCED)
224 kfree(*(char **)kp->arg); 221 kfree(*(char **)kp->arg);
225 222
226 /* This is a hack. We can't need to strdup in early boot, and we 223 /* This is a hack. We can't need to strdup in early boot, and we
227 * don't need to; this mangled commandline is preserved. */ 224 * don't need to; this mangled commandline is preserved. */
228 if (slab_is_available()) { 225 if (slab_is_available()) {
229 kp->perm |= KPARAM_KMALLOCED; 226 kp->flags |= KPARAM_KMALLOCED;
230 *(char **)kp->arg = kstrdup(val, GFP_KERNEL); 227 *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
231 if (!kp->arg) 228 if (!kp->arg)
232 return -ENOMEM; 229 return -ENOMEM;
@@ -241,44 +238,63 @@ int param_get_charp(char *buffer, struct kernel_param *kp)
241 return sprintf(buffer, "%s", *((char **)kp->arg)); 238 return sprintf(buffer, "%s", *((char **)kp->arg));
242} 239}
243 240
241/* Actually could be a bool or an int, for historical reasons. */
244int param_set_bool(const char *val, struct kernel_param *kp) 242int param_set_bool(const char *val, struct kernel_param *kp)
245{ 243{
244 bool v;
245
246 /* No equals means "set"... */ 246 /* No equals means "set"... */
247 if (!val) val = "1"; 247 if (!val) val = "1";
248 248
249 /* One of =[yYnN01] */ 249 /* One of =[yYnN01] */
250 switch (val[0]) { 250 switch (val[0]) {
251 case 'y': case 'Y': case '1': 251 case 'y': case 'Y': case '1':
252 *(int *)kp->arg = 1; 252 v = true;
253 return 0; 253 break;
254 case 'n': case 'N': case '0': 254 case 'n': case 'N': case '0':
255 *(int *)kp->arg = 0; 255 v = false;
256 return 0; 256 break;
257 default:
258 return -EINVAL;
257 } 259 }
258 return -EINVAL; 260
261 if (kp->flags & KPARAM_ISBOOL)
262 *(bool *)kp->arg = v;
263 else
264 *(int *)kp->arg = v;
265 return 0;
259} 266}
260 267
261int param_get_bool(char *buffer, struct kernel_param *kp) 268int param_get_bool(char *buffer, struct kernel_param *kp)
262{ 269{
270 bool val;
271 if (kp->flags & KPARAM_ISBOOL)
272 val = *(bool *)kp->arg;
273 else
274 val = *(int *)kp->arg;
275
263 /* Y and N chosen as being relatively non-coder friendly */ 276 /* Y and N chosen as being relatively non-coder friendly */
264 return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'Y' : 'N'); 277 return sprintf(buffer, "%c", val ? 'Y' : 'N');
265} 278}
266 279
280/* This one must be bool. */
267int param_set_invbool(const char *val, struct kernel_param *kp) 281int param_set_invbool(const char *val, struct kernel_param *kp)
268{ 282{
269 int boolval, ret; 283 int ret;
284 bool boolval;
270 struct kernel_param dummy; 285 struct kernel_param dummy;
271 286
272 dummy.arg = &boolval; 287 dummy.arg = &boolval;
288 dummy.flags = KPARAM_ISBOOL;
273 ret = param_set_bool(val, &dummy); 289 ret = param_set_bool(val, &dummy);
274 if (ret == 0) 290 if (ret == 0)
275 *(int *)kp->arg = !boolval; 291 *(bool *)kp->arg = !boolval;
276 return ret; 292 return ret;
277} 293}
278 294
279int param_get_invbool(char *buffer, struct kernel_param *kp) 295int param_get_invbool(char *buffer, struct kernel_param *kp)
280{ 296{
281 return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y'); 297 return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y');
282} 298}
283 299
284/* We break the rule and mangle the string. */ 300/* We break the rule and mangle the string. */
@@ -591,7 +607,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
591 unsigned int i; 607 unsigned int i;
592 608
593 for (i = 0; i < num; i++) 609 for (i = 0; i < num; i++)
594 if (params[i].perm & KPARAM_KMALLOCED) 610 if (params[i].flags & KPARAM_KMALLOCED)
595 kfree(*(char **)params[i].arg); 611 kfree(*(char **)params[i].arg);
596} 612}
597 613
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index ef5d8a5b2453..29b685f551aa 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3570,12 +3570,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3570 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 3570 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3571 goto done; 3571 goto done;
3572 3572
3573 if (attr->type == PERF_TYPE_RAW) {
3574 pmu = hw_perf_counter_init(counter);
3575 goto done;
3576 }
3577
3578 switch (attr->type) { 3573 switch (attr->type) {
3574 case PERF_TYPE_RAW:
3579 case PERF_TYPE_HARDWARE: 3575 case PERF_TYPE_HARDWARE:
3580 case PERF_TYPE_HW_CACHE: 3576 case PERF_TYPE_HW_CACHE:
3581 pmu = hw_perf_counter_init(counter); 3577 pmu = hw_perf_counter_init(counter);
@@ -3588,6 +3584,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3588 case PERF_TYPE_TRACEPOINT: 3584 case PERF_TYPE_TRACEPOINT:
3589 pmu = tp_perf_counter_init(counter); 3585 pmu = tp_perf_counter_init(counter);
3590 break; 3586 break;
3587
3588 default:
3589 break;
3591 } 3590 }
3592done: 3591done:
3593 err = 0; 3592 err = 0;
@@ -3614,6 +3613,85 @@ done:
3614 return counter; 3613 return counter;
3615} 3614}
3616 3615
3616static int perf_copy_attr(struct perf_counter_attr __user *uattr,
3617 struct perf_counter_attr *attr)
3618{
3619 int ret;
3620 u32 size;
3621
3622 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
3623 return -EFAULT;
3624
3625 /*
3626 * zero the full structure, so that a short copy will be nice.
3627 */
3628 memset(attr, 0, sizeof(*attr));
3629
3630 ret = get_user(size, &uattr->size);
3631 if (ret)
3632 return ret;
3633
3634 if (size > PAGE_SIZE) /* silly large */
3635 goto err_size;
3636
3637 if (!size) /* abi compat */
3638 size = PERF_ATTR_SIZE_VER0;
3639
3640 if (size < PERF_ATTR_SIZE_VER0)
3641 goto err_size;
3642
3643 /*
3644 * If we're handed a bigger struct than we know of,
3645 * ensure all the unknown bits are 0.
3646 */
3647 if (size > sizeof(*attr)) {
3648 unsigned long val;
3649 unsigned long __user *addr;
3650 unsigned long __user *end;
3651
3652 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
3653 sizeof(unsigned long));
3654 end = PTR_ALIGN((void __user *)uattr + size,
3655 sizeof(unsigned long));
3656
3657 for (; addr < end; addr += sizeof(unsigned long)) {
3658 ret = get_user(val, addr);
3659 if (ret)
3660 return ret;
3661 if (val)
3662 goto err_size;
3663 }
3664 }
3665
3666 ret = copy_from_user(attr, uattr, size);
3667 if (ret)
3668 return -EFAULT;
3669
3670 /*
3671 * If the type exists, the corresponding creation will verify
3672 * the attr->config.
3673 */
3674 if (attr->type >= PERF_TYPE_MAX)
3675 return -EINVAL;
3676
3677 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
3678 return -EINVAL;
3679
3680 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
3681 return -EINVAL;
3682
3683 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
3684 return -EINVAL;
3685
3686out:
3687 return ret;
3688
3689err_size:
3690 put_user(sizeof(*attr), &uattr->size);
3691 ret = -E2BIG;
3692 goto out;
3693}
3694
3617/** 3695/**
3618 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 3696 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3619 * 3697 *
@@ -3623,7 +3701,7 @@ done:
3623 * @group_fd: group leader counter fd 3701 * @group_fd: group leader counter fd
3624 */ 3702 */
3625SYSCALL_DEFINE5(perf_counter_open, 3703SYSCALL_DEFINE5(perf_counter_open,
3626 const struct perf_counter_attr __user *, attr_uptr, 3704 struct perf_counter_attr __user *, attr_uptr,
3627 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 3705 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3628{ 3706{
3629 struct perf_counter *counter, *group_leader; 3707 struct perf_counter *counter, *group_leader;
@@ -3639,8 +3717,9 @@ SYSCALL_DEFINE5(perf_counter_open,
3639 if (flags) 3717 if (flags)
3640 return -EINVAL; 3718 return -EINVAL;
3641 3719
3642 if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0) 3720 ret = perf_copy_attr(attr_uptr, &attr);
3643 return -EFAULT; 3721 if (ret)
3722 return ret;
3644 3723
3645 if (!attr.exclude_kernel) { 3724 if (!attr.exclude_kernel) {
3646 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 3725 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 23bd4daeb96b..72067cbdb37f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -116,9 +116,13 @@ config SUSPEND_FREEZER
116 116
117 Turning OFF this setting is NOT recommended! If in doubt, say Y. 117 Turning OFF this setting is NOT recommended! If in doubt, say Y.
118 118
119config HIBERNATION_NVS
120 bool
121
119config HIBERNATION 122config HIBERNATION
120 bool "Hibernation (aka 'suspend to disk')" 123 bool "Hibernation (aka 'suspend to disk')"
121 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 124 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
125 select HIBERNATION_NVS if HAS_IOMEM
122 ---help--- 126 ---help---
123 Enable the suspend to disk (STD) functionality, which is usually 127 Enable the suspend to disk (STD) functionality, which is usually
124 called "hibernation" in user interfaces. STD checkpoints the 128 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 720ea4f781bd..c3b81c30e5d5 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -6,6 +6,9 @@ endif
6obj-$(CONFIG_PM) += main.o 6obj-$(CONFIG_PM) += main.o
7obj-$(CONFIG_PM_SLEEP) += console.o 7obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o 8obj-$(CONFIG_FREEZER) += process.o
9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 9obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o
12obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
10 13
11obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 14obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/hibernate.c
index 5cb080e7eebd..81d2e7464893 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/hibernate.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * kernel/power/disk.c - Suspend-to-disk support. 2 * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support.
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> 6 * Copyright (c) 2004 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
7 * 8 *
8 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
9 *
10 */ 10 */
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
@@ -215,13 +215,13 @@ static int create_image(int platform_mode)
215 if (error) 215 if (error)
216 return error; 216 return error;
217 217
218 /* At this point, device_suspend() has been called, but *not* 218 /* At this point, dpm_suspend_start() has been called, but *not*
219 * device_power_down(). We *must* call device_power_down() now. 219 * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
220 * Otherwise, drivers for some devices (e.g. interrupt controllers) 220 * Otherwise, drivers for some devices (e.g. interrupt controllers)
221 * become desynchronized with the actual state of the hardware 221 * become desynchronized with the actual state of the hardware
222 * at resume time, and evil weirdness ensues. 222 * at resume time, and evil weirdness ensues.
223 */ 223 */
224 error = device_power_down(PMSG_FREEZE); 224 error = dpm_suspend_noirq(PMSG_FREEZE);
225 if (error) { 225 if (error) {
226 printk(KERN_ERR "PM: Some devices failed to power down, " 226 printk(KERN_ERR "PM: Some devices failed to power down, "
227 "aborting hibernation\n"); 227 "aborting hibernation\n");
@@ -262,7 +262,7 @@ static int create_image(int platform_mode)
262 262
263 Power_up: 263 Power_up:
264 sysdev_resume(); 264 sysdev_resume();
265 /* NOTE: device_power_up() is just a resume() for devices 265 /* NOTE: dpm_resume_noirq() is just a resume() for devices
266 * that suspended with irqs off ... no overall powerup. 266 * that suspended with irqs off ... no overall powerup.
267 */ 267 */
268 268
@@ -275,7 +275,7 @@ static int create_image(int platform_mode)
275 Platform_finish: 275 Platform_finish:
276 platform_finish(platform_mode); 276 platform_finish(platform_mode);
277 277
278 device_power_up(in_suspend ? 278 dpm_resume_noirq(in_suspend ?
279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
280 280
281 return error; 281 return error;
@@ -304,7 +304,7 @@ int hibernation_snapshot(int platform_mode)
304 goto Close; 304 goto Close;
305 305
306 suspend_console(); 306 suspend_console();
307 error = device_suspend(PMSG_FREEZE); 307 error = dpm_suspend_start(PMSG_FREEZE);
308 if (error) 308 if (error)
309 goto Recover_platform; 309 goto Recover_platform;
310 310
@@ -315,7 +315,7 @@ int hibernation_snapshot(int platform_mode)
315 /* Control returns here after successful restore */ 315 /* Control returns here after successful restore */
316 316
317 Resume_devices: 317 Resume_devices:
318 device_resume(in_suspend ? 318 dpm_resume_end(in_suspend ?
319 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 319 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
320 resume_console(); 320 resume_console();
321 Close: 321 Close:
@@ -339,7 +339,7 @@ static int resume_target_kernel(bool platform_mode)
339{ 339{
340 int error; 340 int error;
341 341
342 error = device_power_down(PMSG_QUIESCE); 342 error = dpm_suspend_noirq(PMSG_QUIESCE);
343 if (error) { 343 if (error) {
344 printk(KERN_ERR "PM: Some devices failed to power down, " 344 printk(KERN_ERR "PM: Some devices failed to power down, "
345 "aborting resume\n"); 345 "aborting resume\n");
@@ -394,7 +394,7 @@ static int resume_target_kernel(bool platform_mode)
394 Cleanup: 394 Cleanup:
395 platform_restore_cleanup(platform_mode); 395 platform_restore_cleanup(platform_mode);
396 396
397 device_power_up(PMSG_RECOVER); 397 dpm_resume_noirq(PMSG_RECOVER);
398 398
399 return error; 399 return error;
400} 400}
@@ -414,10 +414,10 @@ int hibernation_restore(int platform_mode)
414 414
415 pm_prepare_console(); 415 pm_prepare_console();
416 suspend_console(); 416 suspend_console();
417 error = device_suspend(PMSG_QUIESCE); 417 error = dpm_suspend_start(PMSG_QUIESCE);
418 if (!error) { 418 if (!error) {
419 error = resume_target_kernel(platform_mode); 419 error = resume_target_kernel(platform_mode);
420 device_resume(PMSG_RECOVER); 420 dpm_resume_end(PMSG_RECOVER);
421 } 421 }
422 resume_console(); 422 resume_console();
423 pm_restore_console(); 423 pm_restore_console();
@@ -447,14 +447,14 @@ int hibernation_platform_enter(void)
447 447
448 entering_platform_hibernation = true; 448 entering_platform_hibernation = true;
449 suspend_console(); 449 suspend_console();
450 error = device_suspend(PMSG_HIBERNATE); 450 error = dpm_suspend_start(PMSG_HIBERNATE);
451 if (error) { 451 if (error) {
452 if (hibernation_ops->recover) 452 if (hibernation_ops->recover)
453 hibernation_ops->recover(); 453 hibernation_ops->recover();
454 goto Resume_devices; 454 goto Resume_devices;
455 } 455 }
456 456
457 error = device_power_down(PMSG_HIBERNATE); 457 error = dpm_suspend_noirq(PMSG_HIBERNATE);
458 if (error) 458 if (error)
459 goto Resume_devices; 459 goto Resume_devices;
460 460
@@ -479,11 +479,11 @@ int hibernation_platform_enter(void)
479 Platofrm_finish: 479 Platofrm_finish:
480 hibernation_ops->finish(); 480 hibernation_ops->finish();
481 481
482 device_power_up(PMSG_RESTORE); 482 dpm_suspend_noirq(PMSG_RESTORE);
483 483
484 Resume_devices: 484 Resume_devices:
485 entering_platform_hibernation = false; 485 entering_platform_hibernation = false;
486 device_resume(PMSG_RESTORE); 486 dpm_resume_end(PMSG_RESTORE);
487 resume_console(); 487 resume_console();
488 488
489 Close: 489 Close:
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c
new file mode 100644
index 000000000000..39ac698ef836
--- /dev/null
+++ b/kernel/power/hibernate_nvs.c
@@ -0,0 +1,135 @@
1/*
2 * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
3 *
4 * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/mm.h>
13#include <linux/suspend.h>
14
15/*
16 * Platforms, like ACPI, may want us to save some memory used by them during
17 * hibernation and to restore the contents of this memory during the subsequent
18 * resume. The code below implements a mechanism allowing us to do that.
19 */
20
21struct nvs_page {
22 unsigned long phys_start;
23 unsigned int size;
24 void *kaddr;
25 void *data;
26 struct list_head node;
27};
28
29static LIST_HEAD(nvs_list);
30
31/**
32 * hibernate_nvs_register - register platform NVS memory region to save
33 * @start - physical address of the region
34 * @size - size of the region
35 *
36 * The NVS region need not be page-aligned (both ends) and we arrange
37 * things so that the data from page-aligned addresses in this region will
38 * be copied into separate RAM pages.
39 */
40int hibernate_nvs_register(unsigned long start, unsigned long size)
41{
42 struct nvs_page *entry, *next;
43
44 while (size > 0) {
45 unsigned int nr_bytes;
46
47 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
48 if (!entry)
49 goto Error;
50
51 list_add_tail(&entry->node, &nvs_list);
52 entry->phys_start = start;
53 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
54 entry->size = (size < nr_bytes) ? size : nr_bytes;
55
56 start += entry->size;
57 size -= entry->size;
58 }
59 return 0;
60
61 Error:
62 list_for_each_entry_safe(entry, next, &nvs_list, node) {
63 list_del(&entry->node);
64 kfree(entry);
65 }
66 return -ENOMEM;
67}
68
69/**
70 * hibernate_nvs_free - free data pages allocated for saving NVS regions
71 */
72void hibernate_nvs_free(void)
73{
74 struct nvs_page *entry;
75
76 list_for_each_entry(entry, &nvs_list, node)
77 if (entry->data) {
78 free_page((unsigned long)entry->data);
79 entry->data = NULL;
80 if (entry->kaddr) {
81 iounmap(entry->kaddr);
82 entry->kaddr = NULL;
83 }
84 }
85}
86
87/**
88 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
89 */
90int hibernate_nvs_alloc(void)
91{
92 struct nvs_page *entry;
93
94 list_for_each_entry(entry, &nvs_list, node) {
95 entry->data = (void *)__get_free_page(GFP_KERNEL);
96 if (!entry->data) {
97 hibernate_nvs_free();
98 return -ENOMEM;
99 }
100 }
101 return 0;
102}
103
104/**
105 * hibernate_nvs_save - save NVS memory regions
106 */
107void hibernate_nvs_save(void)
108{
109 struct nvs_page *entry;
110
111 printk(KERN_INFO "PM: Saving platform NVS memory\n");
112
113 list_for_each_entry(entry, &nvs_list, node)
114 if (entry->data) {
115 entry->kaddr = ioremap(entry->phys_start, entry->size);
116 memcpy(entry->data, entry->kaddr, entry->size);
117 }
118}
119
120/**
121 * hibernate_nvs_restore - restore NVS memory regions
122 *
123 * This function is going to be called with interrupts disabled, so it
124 * cannot iounmap the virtual addresses used to access the NVS region.
125 */
126void hibernate_nvs_restore(void)
127{
128 struct nvs_page *entry;
129
130 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
131
132 list_for_each_entry(entry, &nvs_list, node)
133 if (entry->data)
134 memcpy(entry->kaddr, entry->data, entry->size);
135}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 868028280d13..f710e36930cc 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,20 +8,9 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/module.h>
12#include <linux/suspend.h>
13#include <linux/kobject.h> 11#include <linux/kobject.h>
14#include <linux/string.h> 12#include <linux/string.h>
15#include <linux/delay.h>
16#include <linux/errno.h>
17#include <linux/kmod.h>
18#include <linux/init.h>
19#include <linux/console.h>
20#include <linux/cpu.h>
21#include <linux/resume-trace.h> 13#include <linux/resume-trace.h>
22#include <linux/freezer.h>
23#include <linux/vmstat.h>
24#include <linux/syscalls.h>
25 14
26#include "power.h" 15#include "power.h"
27 16
@@ -119,373 +108,6 @@ power_attr(pm_test);
119 108
120#endif /* CONFIG_PM_SLEEP */ 109#endif /* CONFIG_PM_SLEEP */
121 110
122#ifdef CONFIG_SUSPEND
123
124static int suspend_test(int level)
125{
126#ifdef CONFIG_PM_DEBUG
127 if (pm_test_level == level) {
128 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
129 mdelay(5000);
130 return 1;
131 }
132#endif /* !CONFIG_PM_DEBUG */
133 return 0;
134}
135
136#ifdef CONFIG_PM_TEST_SUSPEND
137
138/*
139 * We test the system suspend code by setting an RTC wakealarm a short
140 * time in the future, then suspending. Suspending the devices won't
141 * normally take long ... some systems only need a few milliseconds.
142 *
143 * The time it takes is system-specific though, so when we test this
144 * during system bootup we allow a LOT of time.
145 */
146#define TEST_SUSPEND_SECONDS 5
147
148static unsigned long suspend_test_start_time;
149
150static void suspend_test_start(void)
151{
152 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
153 * What we want is a hardware counter that will work correctly even
154 * during the irqs-are-off stages of the suspend/resume cycle...
155 */
156 suspend_test_start_time = jiffies;
157}
158
159static void suspend_test_finish(const char *label)
160{
161 long nj = jiffies - suspend_test_start_time;
162 unsigned msec;
163
164 msec = jiffies_to_msecs(abs(nj));
165 pr_info("PM: %s took %d.%03d seconds\n", label,
166 msec / 1000, msec % 1000);
167
168 /* Warning on suspend means the RTC alarm period needs to be
169 * larger -- the system was sooo slooowwww to suspend that the
170 * alarm (should have) fired before the system went to sleep!
171 *
172 * Warning on either suspend or resume also means the system
173 * has some performance issues. The stack dump of a WARN_ON
174 * is more likely to get the right attention than a printk...
175 */
176 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
177}
178
179#else
180
181static void suspend_test_start(void)
182{
183}
184
185static void suspend_test_finish(const char *label)
186{
187}
188
189#endif
190
191/* This is just an arbitrary number */
192#define FREE_PAGE_NUMBER (100)
193
194static struct platform_suspend_ops *suspend_ops;
195
196/**
197 * suspend_set_ops - Set the global suspend method table.
198 * @ops: Pointer to ops structure.
199 */
200
201void suspend_set_ops(struct platform_suspend_ops *ops)
202{
203 mutex_lock(&pm_mutex);
204 suspend_ops = ops;
205 mutex_unlock(&pm_mutex);
206}
207
208/**
209 * suspend_valid_only_mem - generic memory-only valid callback
210 *
211 * Platform drivers that implement mem suspend only and only need
212 * to check for that in their .valid callback can use this instead
213 * of rolling their own .valid callback.
214 */
215int suspend_valid_only_mem(suspend_state_t state)
216{
217 return state == PM_SUSPEND_MEM;
218}
219
220/**
221 * suspend_prepare - Do prep work before entering low-power state.
222 *
223 * This is common code that is called for each state that we're entering.
224 * Run suspend notifiers, allocate a console and stop all processes.
225 */
226static int suspend_prepare(void)
227{
228 int error;
229 unsigned int free_pages;
230
231 if (!suspend_ops || !suspend_ops->enter)
232 return -EPERM;
233
234 pm_prepare_console();
235
236 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
237 if (error)
238 goto Finish;
239
240 error = usermodehelper_disable();
241 if (error)
242 goto Finish;
243
244 if (suspend_freeze_processes()) {
245 error = -EAGAIN;
246 goto Thaw;
247 }
248
249 free_pages = global_page_state(NR_FREE_PAGES);
250 if (free_pages < FREE_PAGE_NUMBER) {
251 pr_debug("PM: free some memory\n");
252 shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
253 if (nr_free_pages() < FREE_PAGE_NUMBER) {
254 error = -ENOMEM;
255 printk(KERN_ERR "PM: No enough memory\n");
256 }
257 }
258 if (!error)
259 return 0;
260
261 Thaw:
262 suspend_thaw_processes();
263 usermodehelper_enable();
264 Finish:
265 pm_notifier_call_chain(PM_POST_SUSPEND);
266 pm_restore_console();
267 return error;
268}
269
270/* default implementation */
271void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
272{
273 local_irq_disable();
274}
275
276/* default implementation */
277void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
278{
279 local_irq_enable();
280}
281
282/**
283 * suspend_enter - enter the desired system sleep state.
284 * @state: state to enter
285 *
286 * This function should be called after devices have been suspended.
287 */
288static int suspend_enter(suspend_state_t state)
289{
290 int error;
291
292 if (suspend_ops->prepare) {
293 error = suspend_ops->prepare();
294 if (error)
295 return error;
296 }
297
298 error = device_power_down(PMSG_SUSPEND);
299 if (error) {
300 printk(KERN_ERR "PM: Some devices failed to power down\n");
301 goto Platfrom_finish;
302 }
303
304 if (suspend_ops->prepare_late) {
305 error = suspend_ops->prepare_late();
306 if (error)
307 goto Power_up_devices;
308 }
309
310 if (suspend_test(TEST_PLATFORM))
311 goto Platform_wake;
312
313 error = disable_nonboot_cpus();
314 if (error || suspend_test(TEST_CPUS))
315 goto Enable_cpus;
316
317 arch_suspend_disable_irqs();
318 BUG_ON(!irqs_disabled());
319
320 error = sysdev_suspend(PMSG_SUSPEND);
321 if (!error) {
322 if (!suspend_test(TEST_CORE))
323 error = suspend_ops->enter(state);
324 sysdev_resume();
325 }
326
327 arch_suspend_enable_irqs();
328 BUG_ON(irqs_disabled());
329
330 Enable_cpus:
331 enable_nonboot_cpus();
332
333 Platform_wake:
334 if (suspend_ops->wake)
335 suspend_ops->wake();
336
337 Power_up_devices:
338 device_power_up(PMSG_RESUME);
339
340 Platfrom_finish:
341 if (suspend_ops->finish)
342 suspend_ops->finish();
343
344 return error;
345}
346
347/**
348 * suspend_devices_and_enter - suspend devices and enter the desired system
349 * sleep state.
350 * @state: state to enter
351 */
352int suspend_devices_and_enter(suspend_state_t state)
353{
354 int error;
355
356 if (!suspend_ops)
357 return -ENOSYS;
358
359 if (suspend_ops->begin) {
360 error = suspend_ops->begin(state);
361 if (error)
362 goto Close;
363 }
364 suspend_console();
365 suspend_test_start();
366 error = device_suspend(PMSG_SUSPEND);
367 if (error) {
368 printk(KERN_ERR "PM: Some devices failed to suspend\n");
369 goto Recover_platform;
370 }
371 suspend_test_finish("suspend devices");
372 if (suspend_test(TEST_DEVICES))
373 goto Recover_platform;
374
375 suspend_enter(state);
376
377 Resume_devices:
378 suspend_test_start();
379 device_resume(PMSG_RESUME);
380 suspend_test_finish("resume devices");
381 resume_console();
382 Close:
383 if (suspend_ops->end)
384 suspend_ops->end();
385 return error;
386
387 Recover_platform:
388 if (suspend_ops->recover)
389 suspend_ops->recover();
390 goto Resume_devices;
391}
392
393/**
394 * suspend_finish - Do final work before exiting suspend sequence.
395 *
396 * Call platform code to clean up, restart processes, and free the
397 * console that we've allocated. This is not called for suspend-to-disk.
398 */
399static void suspend_finish(void)
400{
401 suspend_thaw_processes();
402 usermodehelper_enable();
403 pm_notifier_call_chain(PM_POST_SUSPEND);
404 pm_restore_console();
405}
406
407
408
409
410static const char * const pm_states[PM_SUSPEND_MAX] = {
411 [PM_SUSPEND_STANDBY] = "standby",
412 [PM_SUSPEND_MEM] = "mem",
413};
414
415static inline int valid_state(suspend_state_t state)
416{
417 /* All states need lowlevel support and need to be valid
418 * to the lowlevel implementation, no valid callback
419 * implies that none are valid. */
420 if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
421 return 0;
422 return 1;
423}
424
425
426/**
427 * enter_state - Do common work of entering low-power state.
428 * @state: pm_state structure for state we're entering.
429 *
430 * Make sure we're the only ones trying to enter a sleep state. Fail
431 * if someone has beat us to it, since we don't want anything weird to
432 * happen when we wake up.
433 * Then, do the setup for suspend, enter the state, and cleaup (after
434 * we've woken up).
435 */
436static int enter_state(suspend_state_t state)
437{
438 int error;
439
440 if (!valid_state(state))
441 return -ENODEV;
442
443 if (!mutex_trylock(&pm_mutex))
444 return -EBUSY;
445
446 printk(KERN_INFO "PM: Syncing filesystems ... ");
447 sys_sync();
448 printk("done.\n");
449
450 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
451 error = suspend_prepare();
452 if (error)
453 goto Unlock;
454
455 if (suspend_test(TEST_FREEZER))
456 goto Finish;
457
458 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
459 error = suspend_devices_and_enter(state);
460
461 Finish:
462 pr_debug("PM: Finishing wakeup.\n");
463 suspend_finish();
464 Unlock:
465 mutex_unlock(&pm_mutex);
466 return error;
467}
468
469
470/**
471 * pm_suspend - Externally visible function for suspending system.
472 * @state: Enumerated value of state to enter.
473 *
474 * Determine whether or not value is within range, get state
475 * structure, and enter (above).
476 */
477
478int pm_suspend(suspend_state_t state)
479{
480 if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
481 return enter_state(state);
482 return -EINVAL;
483}
484
485EXPORT_SYMBOL(pm_suspend);
486
487#endif /* CONFIG_SUSPEND */
488
489struct kobject *power_kobj; 111struct kobject *power_kobj;
490 112
491/** 113/**
@@ -498,7 +120,6 @@ struct kobject *power_kobj;
498 * store() accepts one of those strings, translates it into the 120 * store() accepts one of those strings, translates it into the
499 * proper enumerated value, and initiates a suspend transition. 121 * proper enumerated value, and initiates a suspend transition.
500 */ 122 */
501
502static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 123static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
503 char *buf) 124 char *buf)
504{ 125{
@@ -596,7 +217,6 @@ static struct attribute_group attr_group = {
596 .attrs = g, 217 .attrs = g,
597}; 218};
598 219
599
600static int __init pm_init(void) 220static int __init pm_init(void)
601{ 221{
602 power_kobj = kobject_create_and_add("power", NULL); 222 power_kobj = kobject_create_and_add("power", NULL);
@@ -606,144 +226,3 @@ static int __init pm_init(void)
606} 226}
607 227
608core_initcall(pm_init); 228core_initcall(pm_init);
609
610
611#ifdef CONFIG_PM_TEST_SUSPEND
612
613#include <linux/rtc.h>
614
615/*
616 * To test system suspend, we need a hands-off mechanism to resume the
617 * system. RTCs wake alarms are a common self-contained mechanism.
618 */
619
620static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
621{
622 static char err_readtime[] __initdata =
623 KERN_ERR "PM: can't read %s time, err %d\n";
624 static char err_wakealarm [] __initdata =
625 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
626 static char err_suspend[] __initdata =
627 KERN_ERR "PM: suspend test failed, error %d\n";
628 static char info_test[] __initdata =
629 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
630
631 unsigned long now;
632 struct rtc_wkalrm alm;
633 int status;
634
635 /* this may fail if the RTC hasn't been initialized */
636 status = rtc_read_time(rtc, &alm.time);
637 if (status < 0) {
638 printk(err_readtime, dev_name(&rtc->dev), status);
639 return;
640 }
641 rtc_tm_to_time(&alm.time, &now);
642
643 memset(&alm, 0, sizeof alm);
644 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
645 alm.enabled = true;
646
647 status = rtc_set_alarm(rtc, &alm);
648 if (status < 0) {
649 printk(err_wakealarm, dev_name(&rtc->dev), status);
650 return;
651 }
652
653 if (state == PM_SUSPEND_MEM) {
654 printk(info_test, pm_states[state]);
655 status = pm_suspend(state);
656 if (status == -ENODEV)
657 state = PM_SUSPEND_STANDBY;
658 }
659 if (state == PM_SUSPEND_STANDBY) {
660 printk(info_test, pm_states[state]);
661 status = pm_suspend(state);
662 }
663 if (status < 0)
664 printk(err_suspend, status);
665
666 /* Some platforms can't detect that the alarm triggered the
667 * wakeup, or (accordingly) disable it after it afterwards.
668 * It's supposed to give oneshot behavior; cope.
669 */
670 alm.enabled = false;
671 rtc_set_alarm(rtc, &alm);
672}
673
674static int __init has_wakealarm(struct device *dev, void *name_ptr)
675{
676 struct rtc_device *candidate = to_rtc_device(dev);
677
678 if (!candidate->ops->set_alarm)
679 return 0;
680 if (!device_may_wakeup(candidate->dev.parent))
681 return 0;
682
683 *(const char **)name_ptr = dev_name(dev);
684 return 1;
685}
686
687/*
688 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
689 * at startup time. They're normally disabled, for faster boot and because
690 * we can't know which states really work on this particular system.
691 */
692static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
693
694static char warn_bad_state[] __initdata =
695 KERN_WARNING "PM: can't test '%s' suspend state\n";
696
697static int __init setup_test_suspend(char *value)
698{
699 unsigned i;
700
701 /* "=mem" ==> "mem" */
702 value++;
703 for (i = 0; i < PM_SUSPEND_MAX; i++) {
704 if (!pm_states[i])
705 continue;
706 if (strcmp(pm_states[i], value) != 0)
707 continue;
708 test_state = (__force suspend_state_t) i;
709 return 0;
710 }
711 printk(warn_bad_state, value);
712 return 0;
713}
714__setup("test_suspend", setup_test_suspend);
715
716static int __init test_suspend(void)
717{
718 static char warn_no_rtc[] __initdata =
719 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
720
721 char *pony = NULL;
722 struct rtc_device *rtc = NULL;
723
724 /* PM is initialized by now; is that state testable? */
725 if (test_state == PM_SUSPEND_ON)
726 goto done;
727 if (!valid_state(test_state)) {
728 printk(warn_bad_state, pm_states[test_state]);
729 goto done;
730 }
731
732 /* RTCs have initialized by now too ... can we use one? */
733 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
734 if (pony)
735 rtc = rtc_class_open(pony);
736 if (!rtc) {
737 printk(warn_no_rtc);
738 goto done;
739 }
740
741 /* go for it */
742 test_wakealarm(rtc, test_state);
743 rtc_class_close(rtc);
744done:
745 return 0;
746}
747late_initcall(test_suspend);
748
749#endif /* CONFIG_PM_TEST_SUSPEND */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 46b5ec7a3afb..26d5a26f82e3 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -45,7 +45,7 @@ static inline char *check_image_kernel(struct swsusp_info *info)
45 */ 45 */
46#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 46#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
47 47
48/* kernel/power/disk.c */ 48/* kernel/power/hibernate.c */
49extern int hibernation_snapshot(int platform_mode); 49extern int hibernation_snapshot(int platform_mode);
50extern int hibernation_restore(int platform_mode); 50extern int hibernation_restore(int platform_mode);
51extern int hibernation_platform_enter(void); 51extern int hibernation_platform_enter(void);
@@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void);
74 74
75extern int create_basic_memory_bitmaps(void); 75extern int create_basic_memory_bitmaps(void);
76extern void free_basic_memory_bitmaps(void); 76extern void free_basic_memory_bitmaps(void);
77extern unsigned int count_data_pages(void); 77extern int swsusp_shrink_memory(void);
78 78
79/** 79/**
80 * Auxiliary structure used for reading the snapshot image data and 80 * Auxiliary structure used for reading the snapshot image data and
@@ -147,9 +147,8 @@ extern int swsusp_swap_in_use(void);
147 */ 147 */
148#define SF_PLATFORM_MODE 1 148#define SF_PLATFORM_MODE 1
149 149
150/* kernel/power/disk.c */ 150/* kernel/power/hibernate.c */
151extern int swsusp_check(void); 151extern int swsusp_check(void);
152extern int swsusp_shrink_memory(void);
153extern void swsusp_free(void); 152extern void swsusp_free(void);
154extern int swsusp_read(unsigned int *flags_p); 153extern int swsusp_read(unsigned int *flags_p);
155extern int swsusp_write(unsigned int flags); 154extern int swsusp_write(unsigned int flags);
@@ -161,22 +160,36 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
161 unsigned int, char *); 160 unsigned int, char *);
162 161
163#ifdef CONFIG_SUSPEND 162#ifdef CONFIG_SUSPEND
164/* kernel/power/main.c */ 163/* kernel/power/suspend.c */
164extern const char *const pm_states[];
165
166extern bool valid_state(suspend_state_t state);
165extern int suspend_devices_and_enter(suspend_state_t state); 167extern int suspend_devices_and_enter(suspend_state_t state);
168extern int enter_state(suspend_state_t state);
166#else /* !CONFIG_SUSPEND */ 169#else /* !CONFIG_SUSPEND */
167static inline int suspend_devices_and_enter(suspend_state_t state) 170static inline int suspend_devices_and_enter(suspend_state_t state)
168{ 171{
169 return -ENOSYS; 172 return -ENOSYS;
170} 173}
174static inline int enter_state(suspend_state_t state) { return -ENOSYS; }
175static inline bool valid_state(suspend_state_t state) { return false; }
171#endif /* !CONFIG_SUSPEND */ 176#endif /* !CONFIG_SUSPEND */
172 177
178#ifdef CONFIG_PM_TEST_SUSPEND
179/* kernel/power/suspend_test.c */
180extern void suspend_test_start(void);
181extern void suspend_test_finish(const char *label);
182#else /* !CONFIG_PM_TEST_SUSPEND */
183static inline void suspend_test_start(void) {}
184static inline void suspend_test_finish(const char *label) {}
185#endif /* !CONFIG_PM_TEST_SUSPEND */
186
173#ifdef CONFIG_PM_SLEEP 187#ifdef CONFIG_PM_SLEEP
174/* kernel/power/main.c */ 188/* kernel/power/main.c */
175extern int pm_notifier_call_chain(unsigned long val); 189extern int pm_notifier_call_chain(unsigned long val);
176#endif 190#endif
177 191
178#ifdef CONFIG_HIGHMEM 192#ifdef CONFIG_HIGHMEM
179unsigned int count_highmem_pages(void);
180int restore_highmem(void); 193int restore_highmem(void);
181#else 194#else
182static inline unsigned int count_highmem_pages(void) { return 0; } 195static inline unsigned int count_highmem_pages(void) { return 0; }
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 33e2e4a819f9..523a451b45d3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,6 +39,14 @@ static int swsusp_page_is_free(struct page *);
39static void swsusp_set_page_forbidden(struct page *); 39static void swsusp_set_page_forbidden(struct page *);
40static void swsusp_unset_page_forbidden(struct page *); 40static void swsusp_unset_page_forbidden(struct page *);
41 41
42/*
43 * Preferred image size in bytes (tunable via /sys/power/image_size).
44 * When it is set to N, swsusp will do its best to ensure the image
45 * size will not exceed N bytes, but if that is impossible, it will
46 * try to create the smallest image possible.
47 */
48unsigned long image_size = 500 * 1024 * 1024;
49
42/* List of PBEs needed for restoring the pages that were allocated before 50/* List of PBEs needed for restoring the pages that were allocated before
43 * the suspend and included in the suspend image, but have also been 51 * the suspend and included in the suspend image, but have also been
44 * allocated by the "resume" kernel, so their contents cannot be written 52 * allocated by the "resume" kernel, so their contents cannot be written
@@ -840,7 +848,7 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
840 * pages. 848 * pages.
841 */ 849 */
842 850
843unsigned int count_highmem_pages(void) 851static unsigned int count_highmem_pages(void)
844{ 852{
845 struct zone *zone; 853 struct zone *zone;
846 unsigned int n = 0; 854 unsigned int n = 0;
@@ -902,7 +910,7 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
902 * pages. 910 * pages.
903 */ 911 */
904 912
905unsigned int count_data_pages(void) 913static unsigned int count_data_pages(void)
906{ 914{
907 struct zone *zone; 915 struct zone *zone;
908 unsigned long pfn, max_zone_pfn; 916 unsigned long pfn, max_zone_pfn;
@@ -1058,6 +1066,74 @@ void swsusp_free(void)
1058 buffer = NULL; 1066 buffer = NULL;
1059} 1067}
1060 1068
1069/**
1070 * swsusp_shrink_memory - Try to free as much memory as needed
1071 *
1072 * ... but do not OOM-kill anyone
1073 *
1074 * Notice: all userland should be stopped before it is called, or
1075 * livelock is possible.
1076 */
1077
1078#define SHRINK_BITE 10000
1079static inline unsigned long __shrink_memory(long tmp)
1080{
1081 if (tmp > SHRINK_BITE)
1082 tmp = SHRINK_BITE;
1083 return shrink_all_memory(tmp);
1084}
1085
1086int swsusp_shrink_memory(void)
1087{
1088 long tmp;
1089 struct zone *zone;
1090 unsigned long pages = 0;
1091 unsigned int i = 0;
1092 char *p = "-\\|/";
1093 struct timeval start, stop;
1094
1095 printk(KERN_INFO "PM: Shrinking memory... ");
1096 do_gettimeofday(&start);
1097 do {
1098 long size, highmem_size;
1099
1100 highmem_size = count_highmem_pages();
1101 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
1102 tmp = size;
1103 size += highmem_size;
1104 for_each_populated_zone(zone) {
1105 tmp += snapshot_additional_pages(zone);
1106 if (is_highmem(zone)) {
1107 highmem_size -=
1108 zone_page_state(zone, NR_FREE_PAGES);
1109 } else {
1110 tmp -= zone_page_state(zone, NR_FREE_PAGES);
1111 tmp += zone->lowmem_reserve[ZONE_NORMAL];
1112 }
1113 }
1114
1115 if (highmem_size < 0)
1116 highmem_size = 0;
1117
1118 tmp += highmem_size;
1119 if (tmp > 0) {
1120 tmp = __shrink_memory(tmp);
1121 if (!tmp)
1122 return -ENOMEM;
1123 pages += tmp;
1124 } else if (size > image_size / PAGE_SIZE) {
1125 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
1126 pages += tmp;
1127 }
1128 printk("\b%c", p[i++%4]);
1129 } while (tmp > 0);
1130 do_gettimeofday(&stop);
1131 printk("\bdone (%lu pages freed)\n", pages);
1132 swsusp_show_speed(&start, &stop, pages, "Freed");
1133
1134 return 0;
1135}
1136
1061#ifdef CONFIG_HIGHMEM 1137#ifdef CONFIG_HIGHMEM
1062/** 1138/**
1063 * count_pages_for_highmem - compute the number of non-highmem pages 1139 * count_pages_for_highmem - compute the number of non-highmem pages
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
new file mode 100644
index 000000000000..6f10dfc2d3e9
--- /dev/null
+++ b/kernel/power/suspend.c
@@ -0,0 +1,300 @@
1/*
2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 *
8 * This file is released under the GPLv2.
9 */
10
11#include <linux/string.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/console.h>
16#include <linux/cpu.h>
17#include <linux/syscalls.h>
18
19#include "power.h"
20
21const char *const pm_states[PM_SUSPEND_MAX] = {
22 [PM_SUSPEND_STANDBY] = "standby",
23 [PM_SUSPEND_MEM] = "mem",
24};
25
26static struct platform_suspend_ops *suspend_ops;
27
28/**
29 * suspend_set_ops - Set the global suspend method table.
30 * @ops: Pointer to ops structure.
31 */
32void suspend_set_ops(struct platform_suspend_ops *ops)
33{
34 mutex_lock(&pm_mutex);
35 suspend_ops = ops;
36 mutex_unlock(&pm_mutex);
37}
38
39bool valid_state(suspend_state_t state)
40{
41 /*
42 * All states need lowlevel support and need to be valid to the lowlevel
43 * implementation, no valid callback implies that none are valid.
44 */
45 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
46}
47
48/**
49 * suspend_valid_only_mem - generic memory-only valid callback
50 *
51 * Platform drivers that implement mem suspend only and only need
52 * to check for that in their .valid callback can use this instead
53 * of rolling their own .valid callback.
54 */
55int suspend_valid_only_mem(suspend_state_t state)
56{
57 return state == PM_SUSPEND_MEM;
58}
59
60static int suspend_test(int level)
61{
62#ifdef CONFIG_PM_DEBUG
63 if (pm_test_level == level) {
64 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
65 mdelay(5000);
66 return 1;
67 }
68#endif /* !CONFIG_PM_DEBUG */
69 return 0;
70}
71
72/**
73 * suspend_prepare - Do prep work before entering low-power state.
74 *
75 * This is common code that is called for each state that we're entering.
76 * Run suspend notifiers, allocate a console and stop all processes.
77 */
78static int suspend_prepare(void)
79{
80 int error;
81
82 if (!suspend_ops || !suspend_ops->enter)
83 return -EPERM;
84
85 pm_prepare_console();
86
87 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
88 if (error)
89 goto Finish;
90
91 error = usermodehelper_disable();
92 if (error)
93 goto Finish;
94
95 error = suspend_freeze_processes();
96 if (!error)
97 return 0;
98
99 suspend_thaw_processes();
100 usermodehelper_enable();
101 Finish:
102 pm_notifier_call_chain(PM_POST_SUSPEND);
103 pm_restore_console();
104 return error;
105}
106
107/* default implementation */
108void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
109{
110 local_irq_disable();
111}
112
113/* default implementation */
114void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
115{
116 local_irq_enable();
117}
118
119/**
120 * suspend_enter - enter the desired system sleep state.
121 * @state: state to enter
122 *
123 * This function should be called after devices have been suspended.
124 */
125static int suspend_enter(suspend_state_t state)
126{
127 int error;
128
129 if (suspend_ops->prepare) {
130 error = suspend_ops->prepare();
131 if (error)
132 return error;
133 }
134
135 error = dpm_suspend_noirq(PMSG_SUSPEND);
136 if (error) {
137 printk(KERN_ERR "PM: Some devices failed to power down\n");
138 goto Platfrom_finish;
139 }
140
141 if (suspend_ops->prepare_late) {
142 error = suspend_ops->prepare_late();
143 if (error)
144 goto Power_up_devices;
145 }
146
147 if (suspend_test(TEST_PLATFORM))
148 goto Platform_wake;
149
150 error = disable_nonboot_cpus();
151 if (error || suspend_test(TEST_CPUS))
152 goto Enable_cpus;
153
154 arch_suspend_disable_irqs();
155 BUG_ON(!irqs_disabled());
156
157 error = sysdev_suspend(PMSG_SUSPEND);
158 if (!error) {
159 if (!suspend_test(TEST_CORE))
160 error = suspend_ops->enter(state);
161 sysdev_resume();
162 }
163
164 arch_suspend_enable_irqs();
165 BUG_ON(irqs_disabled());
166
167 Enable_cpus:
168 enable_nonboot_cpus();
169
170 Platform_wake:
171 if (suspend_ops->wake)
172 suspend_ops->wake();
173
174 Power_up_devices:
175 dpm_resume_noirq(PMSG_RESUME);
176
177 Platfrom_finish:
178 if (suspend_ops->finish)
179 suspend_ops->finish();
180
181 return error;
182}
183
184/**
185 * suspend_devices_and_enter - suspend devices and enter the desired system
186 * sleep state.
187 * @state: state to enter
188 */
189int suspend_devices_and_enter(suspend_state_t state)
190{
191 int error;
192
193 if (!suspend_ops)
194 return -ENOSYS;
195
196 if (suspend_ops->begin) {
197 error = suspend_ops->begin(state);
198 if (error)
199 goto Close;
200 }
201 suspend_console();
202 suspend_test_start();
203 error = dpm_suspend_start(PMSG_SUSPEND);
204 if (error) {
205 printk(KERN_ERR "PM: Some devices failed to suspend\n");
206 goto Recover_platform;
207 }
208 suspend_test_finish("suspend devices");
209 if (suspend_test(TEST_DEVICES))
210 goto Recover_platform;
211
212 suspend_enter(state);
213
214 Resume_devices:
215 suspend_test_start();
216 dpm_resume_end(PMSG_RESUME);
217 suspend_test_finish("resume devices");
218 resume_console();
219 Close:
220 if (suspend_ops->end)
221 suspend_ops->end();
222 return error;
223
224 Recover_platform:
225 if (suspend_ops->recover)
226 suspend_ops->recover();
227 goto Resume_devices;
228}
229
230/**
231 * suspend_finish - Do final work before exiting suspend sequence.
232 *
233 * Call platform code to clean up, restart processes, and free the
234 * console that we've allocated. This is not called for suspend-to-disk.
235 */
236static void suspend_finish(void)
237{
238 suspend_thaw_processes();
239 usermodehelper_enable();
240 pm_notifier_call_chain(PM_POST_SUSPEND);
241 pm_restore_console();
242}
243
244/**
245 * enter_state - Do common work of entering low-power state.
246 * @state: pm_state structure for state we're entering.
247 *
248 * Make sure we're the only ones trying to enter a sleep state. Fail
249 * if someone has beat us to it, since we don't want anything weird to
250 * happen when we wake up.
251 * Then, do the setup for suspend, enter the state, and cleaup (after
252 * we've woken up).
253 */
254int enter_state(suspend_state_t state)
255{
256 int error;
257
258 if (!valid_state(state))
259 return -ENODEV;
260
261 if (!mutex_trylock(&pm_mutex))
262 return -EBUSY;
263
264 printk(KERN_INFO "PM: Syncing filesystems ... ");
265 sys_sync();
266 printk("done.\n");
267
268 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
269 error = suspend_prepare();
270 if (error)
271 goto Unlock;
272
273 if (suspend_test(TEST_FREEZER))
274 goto Finish;
275
276 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
277 error = suspend_devices_and_enter(state);
278
279 Finish:
280 pr_debug("PM: Finishing wakeup.\n");
281 suspend_finish();
282 Unlock:
283 mutex_unlock(&pm_mutex);
284 return error;
285}
286
287/**
288 * pm_suspend - Externally visible function for suspending system.
289 * @state: Enumerated value of state to enter.
290 *
291 * Determine whether or not value is within range, get state
292 * structure, and enter (above).
293 */
294int pm_suspend(suspend_state_t state)
295{
296 if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
297 return enter_state(state);
298 return -EINVAL;
299}
300EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
new file mode 100644
index 000000000000..17d8bb1acf9c
--- /dev/null
+++ b/kernel/power/suspend_test.c
@@ -0,0 +1,187 @@
1/*
2 * kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
3 *
4 * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/rtc.h>
11
12#include "power.h"
13
14/*
15 * We test the system suspend code by setting an RTC wakealarm a short
16 * time in the future, then suspending. Suspending the devices won't
17 * normally take long ... some systems only need a few milliseconds.
18 *
19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time.
21 */
22#define TEST_SUSPEND_SECONDS 5
23
24static unsigned long suspend_test_start_time;
25
26void suspend_test_start(void)
27{
28 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
29 * What we want is a hardware counter that will work correctly even
30 * during the irqs-are-off stages of the suspend/resume cycle...
31 */
32 suspend_test_start_time = jiffies;
33}
34
35void suspend_test_finish(const char *label)
36{
37 long nj = jiffies - suspend_test_start_time;
38 unsigned msec;
39
40 msec = jiffies_to_msecs(abs(nj));
41 pr_info("PM: %s took %d.%03d seconds\n", label,
42 msec / 1000, msec % 1000);
43
44 /* Warning on suspend means the RTC alarm period needs to be
45 * larger -- the system was sooo slooowwww to suspend that the
46 * alarm (should have) fired before the system went to sleep!
47 *
48 * Warning on either suspend or resume also means the system
49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk...
51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
53}
54
55/*
56 * To test system suspend, we need a hands-off mechanism to resume the
57 * system. RTCs wake alarms are a common self-contained mechanism.
58 */
59
60static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
61{
62 static char err_readtime[] __initdata =
63 KERN_ERR "PM: can't read %s time, err %d\n";
64 static char err_wakealarm [] __initdata =
65 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
66 static char err_suspend[] __initdata =
67 KERN_ERR "PM: suspend test failed, error %d\n";
68 static char info_test[] __initdata =
69 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
70
71 unsigned long now;
72 struct rtc_wkalrm alm;
73 int status;
74
75 /* this may fail if the RTC hasn't been initialized */
76 status = rtc_read_time(rtc, &alm.time);
77 if (status < 0) {
78 printk(err_readtime, dev_name(&rtc->dev), status);
79 return;
80 }
81 rtc_tm_to_time(&alm.time, &now);
82
83 memset(&alm, 0, sizeof alm);
84 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
85 alm.enabled = true;
86
87 status = rtc_set_alarm(rtc, &alm);
88 if (status < 0) {
89 printk(err_wakealarm, dev_name(&rtc->dev), status);
90 return;
91 }
92
93 if (state == PM_SUSPEND_MEM) {
94 printk(info_test, pm_states[state]);
95 status = pm_suspend(state);
96 if (status == -ENODEV)
97 state = PM_SUSPEND_STANDBY;
98 }
99 if (state == PM_SUSPEND_STANDBY) {
100 printk(info_test, pm_states[state]);
101 status = pm_suspend(state);
102 }
103 if (status < 0)
104 printk(err_suspend, status);
105
106 /* Some platforms can't detect that the alarm triggered the
107 * wakeup, or (accordingly) disable it after it afterwards.
108 * It's supposed to give oneshot behavior; cope.
109 */
110 alm.enabled = false;
111 rtc_set_alarm(rtc, &alm);
112}
113
114static int __init has_wakealarm(struct device *dev, void *name_ptr)
115{
116 struct rtc_device *candidate = to_rtc_device(dev);
117
118 if (!candidate->ops->set_alarm)
119 return 0;
120 if (!device_may_wakeup(candidate->dev.parent))
121 return 0;
122
123 *(const char **)name_ptr = dev_name(dev);
124 return 1;
125}
126
127/*
128 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
129 * at startup time. They're normally disabled, for faster boot and because
130 * we can't know which states really work on this particular system.
131 */
132static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
133
134static char warn_bad_state[] __initdata =
135 KERN_WARNING "PM: can't test '%s' suspend state\n";
136
137static int __init setup_test_suspend(char *value)
138{
139 unsigned i;
140
141 /* "=mem" ==> "mem" */
142 value++;
143 for (i = 0; i < PM_SUSPEND_MAX; i++) {
144 if (!pm_states[i])
145 continue;
146 if (strcmp(pm_states[i], value) != 0)
147 continue;
148 test_state = (__force suspend_state_t) i;
149 return 0;
150 }
151 printk(warn_bad_state, value);
152 return 0;
153}
154__setup("test_suspend", setup_test_suspend);
155
156static int __init test_suspend(void)
157{
158 static char warn_no_rtc[] __initdata =
159 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
160
161 char *pony = NULL;
162 struct rtc_device *rtc = NULL;
163
164 /* PM is initialized by now; is that state testable? */
165 if (test_state == PM_SUSPEND_ON)
166 goto done;
167 if (!valid_state(test_state)) {
168 printk(warn_bad_state, pm_states[test_state]);
169 goto done;
170 }
171
172 /* RTCs have initialized by now too ... can we use one? */
173 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
174 if (pony)
175 rtc = rtc_class_open(pony);
176 if (!rtc) {
177 printk(warn_no_rtc);
178 goto done;
179 }
180
181 /* go for it */
182 test_wakealarm(rtc, test_state);
183 rtc_class_close(rtc);
184done:
185 return 0;
186}
187late_initcall(test_suspend);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 78c35047586d..6a07f4dbf2f8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -55,14 +55,6 @@
55 55
56#include "power.h" 56#include "power.h"
57 57
58/*
59 * Preferred image size in bytes (tunable via /sys/power/image_size).
60 * When it is set to N, swsusp will do its best to ensure the image
61 * size will not exceed N bytes, but if that is impossible, it will
62 * try to create the smallest image possible.
63 */
64unsigned long image_size = 500 * 1024 * 1024;
65
66int in_suspend __nosavedata = 0; 58int in_suspend __nosavedata = 0;
67 59
68/** 60/**
@@ -194,193 +186,3 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
194 centisecs / 100, centisecs % 100, 186 centisecs / 100, centisecs % 100,
195 kps / 1000, (kps % 1000) / 10); 187 kps / 1000, (kps % 1000) / 10);
196} 188}
197
198/**
199 * swsusp_shrink_memory - Try to free as much memory as needed
200 *
201 * ... but do not OOM-kill anyone
202 *
203 * Notice: all userland should be stopped before it is called, or
204 * livelock is possible.
205 */
206
207#define SHRINK_BITE 10000
208static inline unsigned long __shrink_memory(long tmp)
209{
210 if (tmp > SHRINK_BITE)
211 tmp = SHRINK_BITE;
212 return shrink_all_memory(tmp);
213}
214
215int swsusp_shrink_memory(void)
216{
217 long tmp;
218 struct zone *zone;
219 unsigned long pages = 0;
220 unsigned int i = 0;
221 char *p = "-\\|/";
222 struct timeval start, stop;
223
224 printk(KERN_INFO "PM: Shrinking memory... ");
225 do_gettimeofday(&start);
226 do {
227 long size, highmem_size;
228
229 highmem_size = count_highmem_pages();
230 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
231 tmp = size;
232 size += highmem_size;
233 for_each_populated_zone(zone) {
234 tmp += snapshot_additional_pages(zone);
235 if (is_highmem(zone)) {
236 highmem_size -=
237 zone_page_state(zone, NR_FREE_PAGES);
238 } else {
239 tmp -= zone_page_state(zone, NR_FREE_PAGES);
240 tmp += zone->lowmem_reserve[ZONE_NORMAL];
241 }
242 }
243
244 if (highmem_size < 0)
245 highmem_size = 0;
246
247 tmp += highmem_size;
248 if (tmp > 0) {
249 tmp = __shrink_memory(tmp);
250 if (!tmp)
251 return -ENOMEM;
252 pages += tmp;
253 } else if (size > image_size / PAGE_SIZE) {
254 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
255 pages += tmp;
256 }
257 printk("\b%c", p[i++%4]);
258 } while (tmp > 0);
259 do_gettimeofday(&stop);
260 printk("\bdone (%lu pages freed)\n", pages);
261 swsusp_show_speed(&start, &stop, pages, "Freed");
262
263 return 0;
264}
265
266/*
267 * Platforms, like ACPI, may want us to save some memory used by them during
268 * hibernation and to restore the contents of this memory during the subsequent
269 * resume. The code below implements a mechanism allowing us to do that.
270 */
271
272struct nvs_page {
273 unsigned long phys_start;
274 unsigned int size;
275 void *kaddr;
276 void *data;
277 struct list_head node;
278};
279
280static LIST_HEAD(nvs_list);
281
282/**
283 * hibernate_nvs_register - register platform NVS memory region to save
284 * @start - physical address of the region
285 * @size - size of the region
286 *
287 * The NVS region need not be page-aligned (both ends) and we arrange
288 * things so that the data from page-aligned addresses in this region will
289 * be copied into separate RAM pages.
290 */
291int hibernate_nvs_register(unsigned long start, unsigned long size)
292{
293 struct nvs_page *entry, *next;
294
295 while (size > 0) {
296 unsigned int nr_bytes;
297
298 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
299 if (!entry)
300 goto Error;
301
302 list_add_tail(&entry->node, &nvs_list);
303 entry->phys_start = start;
304 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
305 entry->size = (size < nr_bytes) ? size : nr_bytes;
306
307 start += entry->size;
308 size -= entry->size;
309 }
310 return 0;
311
312 Error:
313 list_for_each_entry_safe(entry, next, &nvs_list, node) {
314 list_del(&entry->node);
315 kfree(entry);
316 }
317 return -ENOMEM;
318}
319
320/**
321 * hibernate_nvs_free - free data pages allocated for saving NVS regions
322 */
323void hibernate_nvs_free(void)
324{
325 struct nvs_page *entry;
326
327 list_for_each_entry(entry, &nvs_list, node)
328 if (entry->data) {
329 free_page((unsigned long)entry->data);
330 entry->data = NULL;
331 if (entry->kaddr) {
332 iounmap(entry->kaddr);
333 entry->kaddr = NULL;
334 }
335 }
336}
337
338/**
339 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
340 */
341int hibernate_nvs_alloc(void)
342{
343 struct nvs_page *entry;
344
345 list_for_each_entry(entry, &nvs_list, node) {
346 entry->data = (void *)__get_free_page(GFP_KERNEL);
347 if (!entry->data) {
348 hibernate_nvs_free();
349 return -ENOMEM;
350 }
351 }
352 return 0;
353}
354
355/**
356 * hibernate_nvs_save - save NVS memory regions
357 */
358void hibernate_nvs_save(void)
359{
360 struct nvs_page *entry;
361
362 printk(KERN_INFO "PM: Saving platform NVS memory\n");
363
364 list_for_each_entry(entry, &nvs_list, node)
365 if (entry->data) {
366 entry->kaddr = ioremap(entry->phys_start, entry->size);
367 memcpy(entry->data, entry->kaddr, entry->size);
368 }
369}
370
371/**
372 * hibernate_nvs_restore - restore NVS memory regions
373 *
374 * This function is going to be called with interrupts disabled, so it
375 * cannot iounmap the virtual addresses used to access the NVS region.
376 */
377void hibernate_nvs_restore(void)
378{
379 struct nvs_page *entry;
380
381 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
382
383 list_for_each_entry(entry, &nvs_list, node)
384 if (entry->data)
385 memcpy(entry->kaddr, entry->data, entry->size);
386}
diff --git a/kernel/sched.c b/kernel/sched.c
index f04aa9664504..8ec9d13140be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2192,6 +2192,7 @@ void kick_process(struct task_struct *p)
2192 smp_send_reschedule(cpu); 2192 smp_send_reschedule(cpu);
2193 preempt_enable(); 2193 preempt_enable();
2194} 2194}
2195EXPORT_SYMBOL_GPL(kick_process);
2195 2196
2196/* 2197/*
2197 * Return a low guess at the load of a migration-source cpu weighted 2198 * Return a low guess at the load of a migration-source cpu weighted
diff --git a/lib/extable.c b/lib/extable.c
index 179c08745595..4cac81ec225e 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start,
39 sort(start, finish - start, sizeof(struct exception_table_entry), 39 sort(start, finish - start, sizeof(struct exception_table_entry),
40 cmp_ex, NULL); 40 cmp_ex, NULL);
41} 41}
42#endif 42
43#ifdef CONFIG_MODULES
44/*
45 * If the exception table is sorted, any referring to the module init
46 * will be at the beginning or the end.
47 */
48void trim_init_extable(struct module *m)
49{
50 /*trim the beginning*/
51 while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
52 m->extable++;
53 m->num_exentries--;
54 }
55 /*trim the end*/
56 while (m->num_exentries &&
57 within_module_init(m->extable[m->num_exentries-1].insn, m))
58 m->num_exentries--;
59}
60#endif /* CONFIG_MODULES */
61#endif /* !ARCH_HAS_SORT_EXTABLE */
43 62
44#ifndef ARCH_HAS_SEARCH_EXTABLE 63#ifndef ARCH_HAS_SEARCH_EXTABLE
45/* 64/*
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3dd4a909a1de..11a8a10a3909 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc; 47 struct page_cgroup *base, *pc;
48 unsigned long table_size; 48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index; 49 unsigned long start_pfn, nr_pages, index;
50 struct page *page;
51 unsigned int order;
52 50
53 start_pfn = NODE_DATA(nid)->node_start_pfn; 51 start_pfn = NODE_DATA(nid)->node_start_pfn;
54 nr_pages = NODE_DATA(nid)->node_spanned_pages; 52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
57 return 0; 55 return 0;
58 56
59 table_size = sizeof(struct page_cgroup) * nr_pages; 57 table_size = sizeof(struct page_cgroup) * nr_pages;
60 order = get_order(table_size); 58
61 page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); 59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
62 if (!page) 60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
63 page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); 61 if (!base)
64 if (!page)
65 return -ENOMEM; 62 return -ENOMEM;
66 base = page_address(page);
67 for (index = 0; index < nr_pages; index++) { 63 for (index = 0; index < nr_pages; index++) {
68 pc = base + index; 64 pc = base + index;
69 __init_page_cgroup(pc, start_pfn + index); 65 __init_page_cgroup(pc, start_pfn + index);
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
73 return 0; 69 return 0;
74} 70}
75 71
76void __init page_cgroup_init(void) 72void __init page_cgroup_init_flatmem(void)
77{ 73{
78 74
79 int nid, fail; 75 int nid, fail;
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
117 if (!section->page_cgroup) { 113 if (!section->page_cgroup) {
118 nid = page_to_nid(pfn_to_page(pfn)); 114 nid = page_to_nid(pfn_to_page(pfn));
119 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
120 if (slab_is_available()) { 116 VM_BUG_ON(!slab_is_available());
121 base = kmalloc_node(table_size, 117 base = kmalloc_node(table_size,
122 GFP_KERNEL | __GFP_NOWARN, nid); 118 GFP_KERNEL | __GFP_NOWARN, nid);
123 if (!base) 119 if (!base)
124 base = vmalloc_node(table_size, nid); 120 base = vmalloc_node(table_size, nid);
125 } else {
126 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
127 table_size,
128 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
129 }
130 } else { 121 } else {
131 /* 122 /*
132 * We don't have to allocate page_cgroup again, but 123 * We don't have to allocate page_cgroup again, but
diff --git a/mm/slab.c b/mm/slab.c
index f46b65d124e5..18e3164de09a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,6 +304,12 @@ struct kmem_list3 {
304}; 304};
305 305
306/* 306/*
307 * The slab allocator is initialized with interrupts disabled. Therefore, make
308 * sure early boot allocations don't accidentally enable interrupts.
309 */
310static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
311
312/*
307 * Need this for bootstrapping a per node allocator. 313 * Need this for bootstrapping a per node allocator.
308 */ 314 */
309#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 315#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -753,6 +759,7 @@ static enum {
753 NONE, 759 NONE,
754 PARTIAL_AC, 760 PARTIAL_AC,
755 PARTIAL_L3, 761 PARTIAL_L3,
762 EARLY,
756 FULL 763 FULL
757} g_cpucache_up; 764} g_cpucache_up;
758 765
@@ -761,7 +768,7 @@ static enum {
761 */ 768 */
762int slab_is_available(void) 769int slab_is_available(void)
763{ 770{
764 return g_cpucache_up == FULL; 771 return g_cpucache_up >= EARLY;
765} 772}
766 773
767static DEFINE_PER_CPU(struct delayed_work, reap_work); 774static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1625,19 +1632,27 @@ void __init kmem_cache_init(void)
1625 } 1632 }
1626 } 1633 }
1627 1634
1628 /* 6) resize the head arrays to their final sizes */ 1635 g_cpucache_up = EARLY;
1629 {
1630 struct kmem_cache *cachep;
1631 mutex_lock(&cache_chain_mutex);
1632 list_for_each_entry(cachep, &cache_chain, next)
1633 if (enable_cpucache(cachep, GFP_NOWAIT))
1634 BUG();
1635 mutex_unlock(&cache_chain_mutex);
1636 }
1637 1636
1638 /* Annotate slab for lockdep -- annotate the malloc caches */ 1637 /* Annotate slab for lockdep -- annotate the malloc caches */
1639 init_lock_keys(); 1638 init_lock_keys();
1639}
1640
1641void __init kmem_cache_init_late(void)
1642{
1643 struct kmem_cache *cachep;
1644
1645 /*
1646 * Interrupts are enabled now so all GFP allocations are safe.
1647 */
1648 slab_gfp_mask = __GFP_BITS_MASK;
1640 1649
1650 /* 6) resize the head arrays to their final sizes */
1651 mutex_lock(&cache_chain_mutex);
1652 list_for_each_entry(cachep, &cache_chain, next)
1653 if (enable_cpucache(cachep, GFP_NOWAIT))
1654 BUG();
1655 mutex_unlock(&cache_chain_mutex);
1641 1656
1642 /* Done! */ 1657 /* Done! */
1643 g_cpucache_up = FULL; 1658 g_cpucache_up = FULL;
@@ -2102,7 +2117,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2102 for_each_online_node(node) { 2117 for_each_online_node(node) {
2103 cachep->nodelists[node] = 2118 cachep->nodelists[node] =
2104 kmalloc_node(sizeof(struct kmem_list3), 2119 kmalloc_node(sizeof(struct kmem_list3),
2105 GFP_KERNEL, node); 2120 gfp, node);
2106 BUG_ON(!cachep->nodelists[node]); 2121 BUG_ON(!cachep->nodelists[node]);
2107 kmem_list3_init(cachep->nodelists[node]); 2122 kmem_list3_init(cachep->nodelists[node]);
2108 } 2123 }
@@ -3354,6 +3369,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3354 unsigned long save_flags; 3369 unsigned long save_flags;
3355 void *ptr; 3370 void *ptr;
3356 3371
3372 flags &= slab_gfp_mask;
3373
3357 lockdep_trace_alloc(flags); 3374 lockdep_trace_alloc(flags);
3358 3375
3359 if (slab_should_failslab(cachep, flags)) 3376 if (slab_should_failslab(cachep, flags))
@@ -3434,6 +3451,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3434 unsigned long save_flags; 3451 unsigned long save_flags;
3435 void *objp; 3452 void *objp;
3436 3453
3454 flags &= slab_gfp_mask;
3455
3437 lockdep_trace_alloc(flags); 3456 lockdep_trace_alloc(flags);
3438 3457
3439 if (slab_should_failslab(cachep, flags)) 3458 if (slab_should_failslab(cachep, flags))
diff --git a/mm/slub.c b/mm/slub.c
index 3964d3ce4c15..30354bfeb43d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -178,6 +178,12 @@ static enum {
178 SYSFS /* Sysfs up */ 178 SYSFS /* Sysfs up */
179} slab_state = DOWN; 179} slab_state = DOWN;
180 180
181/*
182 * The slab allocator is initialized with interrupts disabled. Therefore, make
183 * sure early boot allocations don't accidentally enable interrupts.
184 */
185static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
186
181/* A list of all slab caches on the system */ 187/* A list of all slab caches on the system */
182static DECLARE_RWSEM(slub_lock); 188static DECLARE_RWSEM(slub_lock);
183static LIST_HEAD(slab_caches); 189static LIST_HEAD(slab_caches);
@@ -1595,6 +1601,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1595 unsigned long flags; 1601 unsigned long flags;
1596 unsigned int objsize; 1602 unsigned int objsize;
1597 1603
1604 gfpflags &= slab_gfp_mask;
1605
1598 lockdep_trace_alloc(gfpflags); 1606 lockdep_trace_alloc(gfpflags);
1599 might_sleep_if(gfpflags & __GFP_WAIT); 1607 might_sleep_if(gfpflags & __GFP_WAIT);
1600 1608
@@ -3104,6 +3112,14 @@ void __init kmem_cache_init(void)
3104 nr_cpu_ids, nr_node_ids); 3112 nr_cpu_ids, nr_node_ids);
3105} 3113}
3106 3114
3115void __init kmem_cache_init_late(void)
3116{
3117 /*
3118 * Interrupts are enabled now so all GFP allocations are safe.
3119 */
3120 slab_gfp_mask = __GFP_BITS_MASK;
3121}
3122
3107/* 3123/*
3108 * Find a mergeable slab cache 3124 * Find a mergeable slab cache
3109 */ 3125 */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d254306562cd..95c08a8cc2ba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2056,7 +2056,7 @@ unsigned long global_lru_pages(void)
2056 + global_page_state(NR_INACTIVE_FILE); 2056 + global_page_state(NR_INACTIVE_FILE);
2057} 2057}
2058 2058
2059#ifdef CONFIG_PM 2059#ifdef CONFIG_HIBERNATION
2060/* 2060/*
2061 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 2061 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
2062 * from LRU lists system-wide, for given pass and priority. 2062 * from LRU lists system-wide, for given pass and priority.
@@ -2196,7 +2196,7 @@ out:
2196 2196
2197 return sc.nr_reclaimed; 2197 return sc.nr_reclaimed;
2198} 2198}
2199#endif 2199#endif /* CONFIG_HIBERNATION */
2200 2200
2201/* It's optimal to keep kswapds on the same CPUs as their memory, but 2201/* It's optimal to keep kswapds on the same CPUs as their memory, but
2202 not required for correctness. So if the last cpu in a node goes 2202 not required for correctness. So if the last cpu in a node goes
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index bb8579a141a8..a49484e67e1d 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -246,7 +246,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
246 chan->vdev = vdev; 246 chan->vdev = vdev;
247 247
248 /* We expect one virtqueue, for requests. */ 248 /* We expect one virtqueue, for requests. */
249 chan->vq = vdev->config->find_vq(vdev, 0, req_done); 249 chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
250 if (IS_ERR(chan->vq)) { 250 if (IS_ERR(chan->vq)) {
251 err = PTR_ERR(chan->vq); 251 err = PTR_ERR(chan->vq);
252 goto out_free_vq; 252 goto out_free_vq;
@@ -261,7 +261,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
261 return 0; 261 return 0;
262 262
263out_free_vq: 263out_free_vq:
264 vdev->config->del_vq(chan->vq); 264 vdev->config->del_vqs(vdev);
265fail: 265fail:
266 mutex_lock(&virtio_9p_lock); 266 mutex_lock(&virtio_9p_lock);
267 chan_index--; 267 chan_index--;
@@ -332,7 +332,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
332 BUG_ON(chan->inuse); 332 BUG_ON(chan->inuse);
333 333
334 if (chan->initialized) { 334 if (chan->initialized) {
335 vdev->config->del_vq(chan->vq); 335 vdev->config->del_vqs(vdev);
336 chan->initialized = false; 336 chan->initialized = false;
337 } 337 }
338} 338}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index a3344285ccf4..40e0045876ee 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -641,7 +641,7 @@ static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
641 id->vendor = TO_NATIVE(id->vendor); 641 id->vendor = TO_NATIVE(id->vendor);
642 642
643 strcpy(alias, "virtio:"); 643 strcpy(alias, "virtio:");
644 ADD(alias, "d", 1, id->device); 644 ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device);
645 ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor); 645 ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor);
646 646
647 add_wildcard(alias); 647 add_wildcard(alias);
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index fbf5c933baa4..586965f9605f 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -1037,7 +1037,7 @@ static int aoa_fabric_layout_probe(struct soundbus_dev *sdev)
1037 } 1037 }
1038 ldev->selfptr_headphone.ptr = ldev; 1038 ldev->selfptr_headphone.ptr = ldev;
1039 ldev->selfptr_lineout.ptr = ldev; 1039 ldev->selfptr_lineout.ptr = ldev;
1040 sdev->ofdev.dev.driver_data = ldev; 1040 dev_set_drvdata(&sdev->ofdev.dev, ldev);
1041 list_add(&ldev->list, &layouts_list); 1041 list_add(&ldev->list, &layouts_list);
1042 layouts_list_items++; 1042 layouts_list_items++;
1043 1043
@@ -1081,7 +1081,7 @@ static int aoa_fabric_layout_probe(struct soundbus_dev *sdev)
1081 1081
1082static int aoa_fabric_layout_remove(struct soundbus_dev *sdev) 1082static int aoa_fabric_layout_remove(struct soundbus_dev *sdev)
1083{ 1083{
1084 struct layout_dev *ldev = sdev->ofdev.dev.driver_data; 1084 struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
1085 int i; 1085 int i;
1086 1086
1087 for (i=0; i<MAX_CODECS_PER_BUS; i++) { 1087 for (i=0; i<MAX_CODECS_PER_BUS; i++) {
@@ -1114,7 +1114,7 @@ static int aoa_fabric_layout_remove(struct soundbus_dev *sdev)
1114#ifdef CONFIG_PM 1114#ifdef CONFIG_PM
1115static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t state) 1115static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t state)
1116{ 1116{
1117 struct layout_dev *ldev = sdev->ofdev.dev.driver_data; 1117 struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
1118 1118
1119 if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off) 1119 if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
1120 ldev->gpio.methods->all_amps_off(&ldev->gpio); 1120 ldev->gpio.methods->all_amps_off(&ldev->gpio);
@@ -1124,7 +1124,7 @@ static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t sta
1124 1124
1125static int aoa_fabric_layout_resume(struct soundbus_dev *sdev) 1125static int aoa_fabric_layout_resume(struct soundbus_dev *sdev)
1126{ 1126{
1127 struct layout_dev *ldev = sdev->ofdev.dev.driver_data; 1127 struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
1128 1128
1129 if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off) 1129 if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
1130 ldev->gpio.methods->all_amps_restore(&ldev->gpio); 1130 ldev->gpio.methods->all_amps_restore(&ldev->gpio);
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 418c84c99d69..4e3b819d4993 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -358,14 +358,14 @@ static int i2sbus_probe(struct macio_dev* dev, const struct of_device_id *match)
358 return -ENODEV; 358 return -ENODEV;
359 } 359 }
360 360
361 dev->ofdev.dev.driver_data = control; 361 dev_set_drvdata(&dev->ofdev.dev, control);
362 362
363 return 0; 363 return 0;
364} 364}
365 365
366static int i2sbus_remove(struct macio_dev* dev) 366static int i2sbus_remove(struct macio_dev* dev)
367{ 367{
368 struct i2sbus_control *control = dev->ofdev.dev.driver_data; 368 struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
369 struct i2sbus_dev *i2sdev, *tmp; 369 struct i2sbus_dev *i2sdev, *tmp;
370 370
371 list_for_each_entry_safe(i2sdev, tmp, &control->list, item) 371 list_for_each_entry_safe(i2sdev, tmp, &control->list, item)
@@ -377,7 +377,7 @@ static int i2sbus_remove(struct macio_dev* dev)
377#ifdef CONFIG_PM 377#ifdef CONFIG_PM
378static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state) 378static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state)
379{ 379{
380 struct i2sbus_control *control = dev->ofdev.dev.driver_data; 380 struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
381 struct codec_info_item *cii; 381 struct codec_info_item *cii;
382 struct i2sbus_dev* i2sdev; 382 struct i2sbus_dev* i2sdev;
383 int err, ret = 0; 383 int err, ret = 0;
@@ -407,7 +407,7 @@ static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state)
407 407
408static int i2sbus_resume(struct macio_dev* dev) 408static int i2sbus_resume(struct macio_dev* dev)
409{ 409{
410 struct i2sbus_control *control = dev->ofdev.dev.driver_data; 410 struct i2sbus_control *control = dev_get_drvdata(&dev->ofdev.dev);
411 struct codec_info_item *cii; 411 struct codec_info_item *cii;
412 struct i2sbus_dev* i2sdev; 412 struct i2sbus_dev* i2sdev;
413 int err, ret = 0; 413 int err, ret = 0;
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 7bbdda041a99..6061fb5f4e1c 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -205,3 +205,5 @@ config SND_PCM_XRUN_DEBUG
205 205
206config SND_VMASTER 206config SND_VMASTER
207 bool 207 bool
208
209source "sound/core/seq/Kconfig"
diff --git a/sound/core/init.c b/sound/core/init.c
index fd56afe846ed..d5d40d78c409 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -152,15 +152,8 @@ int snd_card_create(int idx, const char *xid,
152 card = kzalloc(sizeof(*card) + extra_size, GFP_KERNEL); 152 card = kzalloc(sizeof(*card) + extra_size, GFP_KERNEL);
153 if (!card) 153 if (!card)
154 return -ENOMEM; 154 return -ENOMEM;
155 if (xid) { 155 if (xid)
156 if (!snd_info_check_reserved_words(xid)) {
157 snd_printk(KERN_ERR
158 "given id string '%s' is reserved.\n", xid);
159 err = -EBUSY;
160 goto __error;
161 }
162 strlcpy(card->id, xid, sizeof(card->id)); 156 strlcpy(card->id, xid, sizeof(card->id));
163 }
164 err = 0; 157 err = 0;
165 mutex_lock(&snd_card_mutex); 158 mutex_lock(&snd_card_mutex);
166 if (idx < 0) { 159 if (idx < 0) {
@@ -483,22 +476,28 @@ int snd_card_free(struct snd_card *card)
483 476
484EXPORT_SYMBOL(snd_card_free); 477EXPORT_SYMBOL(snd_card_free);
485 478
486static void choose_default_id(struct snd_card *card) 479static void snd_card_set_id_no_lock(struct snd_card *card, const char *nid)
487{ 480{
488 int i, len, idx_flag = 0, loops = SNDRV_CARDS; 481 int i, len, idx_flag = 0, loops = SNDRV_CARDS;
489 char *id, *spos; 482 const char *spos, *src;
483 char *id;
490 484
491 id = spos = card->shortname; 485 if (nid == NULL) {
492 while (*id != '\0') { 486 id = card->shortname;
493 if (*id == ' ') 487 spos = src = id;
494 spos = id + 1; 488 while (*id != '\0') {
495 id++; 489 if (*id == ' ')
490 spos = id + 1;
491 id++;
492 }
493 } else {
494 spos = src = nid;
496 } 495 }
497 id = card->id; 496 id = card->id;
498 while (*spos != '\0' && !isalnum(*spos)) 497 while (*spos != '\0' && !isalnum(*spos))
499 spos++; 498 spos++;
500 if (isdigit(*spos)) 499 if (isdigit(*spos))
501 *id++ = isalpha(card->shortname[0]) ? card->shortname[0] : 'D'; 500 *id++ = isalpha(src[0]) ? src[0] : 'D';
502 while (*spos != '\0' && (size_t)(id - card->id) < sizeof(card->id) - 1) { 501 while (*spos != '\0' && (size_t)(id - card->id) < sizeof(card->id) - 1) {
503 if (isalnum(*spos)) 502 if (isalnum(*spos))
504 *id++ = *spos; 503 *id++ = *spos;
@@ -513,7 +512,7 @@ static void choose_default_id(struct snd_card *card)
513 512
514 while (1) { 513 while (1) {
515 if (loops-- == 0) { 514 if (loops-- == 0) {
516 snd_printk(KERN_ERR "unable to choose default card id (%s)\n", id); 515 snd_printk(KERN_ERR "unable to set card id (%s)\n", id);
517 strcpy(card->id, card->proc_root->name); 516 strcpy(card->id, card->proc_root->name);
518 return; 517 return;
519 } 518 }
@@ -539,14 +538,33 @@ static void choose_default_id(struct snd_card *card)
539 spos = id + len - 2; 538 spos = id + len - 2;
540 if ((size_t)len <= sizeof(card->id) - 2) 539 if ((size_t)len <= sizeof(card->id) - 2)
541 spos++; 540 spos++;
542 *spos++ = '_'; 541 *(char *)spos++ = '_';
543 *spos++ = '1'; 542 *(char *)spos++ = '1';
544 *spos++ = '\0'; 543 *(char *)spos++ = '\0';
545 idx_flag++; 544 idx_flag++;
546 } 545 }
547 } 546 }
548} 547}
549 548
549/**
550 * snd_card_set_id - set card identification name
551 * @card: soundcard structure
552 * @nid: new identification string
553 *
554 * This function sets the card identification and checks for name
555 * collisions.
556 */
557void snd_card_set_id(struct snd_card *card, const char *nid)
558{
559 /* check if user specified own card->id */
560 if (card->id[0] != '\0')
561 return;
562 mutex_lock(&snd_card_mutex);
563 snd_card_set_id_no_lock(card, nid);
564 mutex_unlock(&snd_card_mutex);
565}
566EXPORT_SYMBOL(snd_card_set_id);
567
550#ifndef CONFIG_SYSFS_DEPRECATED 568#ifndef CONFIG_SYSFS_DEPRECATED
551static ssize_t 569static ssize_t
552card_id_show_attr(struct device *dev, 570card_id_show_attr(struct device *dev,
@@ -640,8 +658,7 @@ int snd_card_register(struct snd_card *card)
640 mutex_unlock(&snd_card_mutex); 658 mutex_unlock(&snd_card_mutex);
641 return 0; 659 return 0;
642 } 660 }
643 if (card->id[0] == '\0') 661 snd_card_set_id_no_lock(card, card->id[0] == '\0' ? NULL : card->id);
644 choose_default_id(card);
645 snd_cards[card->number] = card; 662 snd_cards[card->number] = card;
646 mutex_unlock(&snd_card_mutex); 663 mutex_unlock(&snd_card_mutex);
647 init_info_for_card(card); 664 init_info_for_card(card);
diff --git a/sound/core/jack.c b/sound/core/jack.c
index d54d1a05fe65..f705eec7372a 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -63,7 +63,7 @@ static int snd_jack_dev_register(struct snd_device *device)
63 63
64 /* Default to the sound card device. */ 64 /* Default to the sound card device. */
65 if (!jack->input_dev->dev.parent) 65 if (!jack->input_dev->dev.parent)
66 jack->input_dev->dev.parent = card->dev; 66 jack->input_dev->dev.parent = snd_card_get_device_link(card);
67 67
68 err = input_register_device(jack->input_dev); 68 err = input_register_device(jack->input_dev);
69 if (err == 0) 69 if (err == 0)
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index dda000b9684c..dbe406b82591 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -31,6 +31,7 @@
31#include <linux/time.h> 31#include <linux/time.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include <linux/math64.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <sound/core.h> 36#include <sound/core.h>
36#include <sound/minors.h> 37#include <sound/minors.h>
@@ -617,9 +618,7 @@ static long snd_pcm_oss_bytes(struct snd_pcm_substream *substream, long frames)
617#else 618#else
618 { 619 {
619 u64 bsize = (u64)runtime->oss.buffer_bytes * (u64)bytes; 620 u64 bsize = (u64)runtime->oss.buffer_bytes * (u64)bytes;
620 u32 rem; 621 return div_u64(bsize, buffer_size);
621 div64_32(&bsize, buffer_size, &rem);
622 return (long)bsize;
623 } 622 }
624#endif 623#endif
625} 624}
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index d659995ac3ac..333e4dd29450 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/time.h> 24#include <linux/time.h>
25#include <linux/math64.h>
25#include <sound/core.h> 26#include <sound/core.h>
26#include <sound/control.h> 27#include <sound/control.h>
27#include <sound/info.h> 28#include <sound/info.h>
@@ -126,24 +127,37 @@ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_ufram
126} 127}
127 128
128#ifdef CONFIG_SND_PCM_XRUN_DEBUG 129#ifdef CONFIG_SND_PCM_XRUN_DEBUG
129#define xrun_debug(substream) ((substream)->pstr->xrun_debug) 130#define xrun_debug(substream, mask) ((substream)->pstr->xrun_debug & (mask))
130#else 131#else
131#define xrun_debug(substream) 0 132#define xrun_debug(substream, mask) 0
132#endif 133#endif
133 134
134#define dump_stack_on_xrun(substream) do { \ 135#define dump_stack_on_xrun(substream) do { \
135 if (xrun_debug(substream) > 1) \ 136 if (xrun_debug(substream, 2)) \
136 dump_stack(); \ 137 dump_stack(); \
137 } while (0) 138 } while (0)
138 139
140static void pcm_debug_name(struct snd_pcm_substream *substream,
141 char *name, size_t len)
142{
143 snprintf(name, len, "pcmC%dD%d%c:%d",
144 substream->pcm->card->number,
145 substream->pcm->device,
146 substream->stream ? 'c' : 'p',
147 substream->number);
148}
149
139static void xrun(struct snd_pcm_substream *substream) 150static void xrun(struct snd_pcm_substream *substream)
140{ 151{
152 struct snd_pcm_runtime *runtime = substream->runtime;
153
154 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
155 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
141 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 156 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
142 if (xrun_debug(substream)) { 157 if (xrun_debug(substream, 1)) {
143 snd_printd(KERN_DEBUG "XRUN: pcmC%dD%d%c\n", 158 char name[16];
144 substream->pcm->card->number, 159 pcm_debug_name(substream, name, sizeof(name));
145 substream->pcm->device, 160 snd_printd(KERN_DEBUG "XRUN: %s\n", name);
146 substream->stream ? 'c' : 'p');
147 dump_stack_on_xrun(substream); 161 dump_stack_on_xrun(substream);
148 } 162 }
149} 163}
@@ -154,16 +168,16 @@ snd_pcm_update_hw_ptr_pos(struct snd_pcm_substream *substream,
154{ 168{
155 snd_pcm_uframes_t pos; 169 snd_pcm_uframes_t pos;
156 170
157 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
158 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
159 pos = substream->ops->pointer(substream); 171 pos = substream->ops->pointer(substream);
160 if (pos == SNDRV_PCM_POS_XRUN) 172 if (pos == SNDRV_PCM_POS_XRUN)
161 return pos; /* XRUN */ 173 return pos; /* XRUN */
162 if (pos >= runtime->buffer_size) { 174 if (pos >= runtime->buffer_size) {
163 if (printk_ratelimit()) { 175 if (printk_ratelimit()) {
164 snd_printd(KERN_ERR "BUG: stream = %i, pos = 0x%lx, " 176 char name[16];
177 pcm_debug_name(substream, name, sizeof(name));
178 snd_printd(KERN_ERR "BUG: %s, pos = 0x%lx, "
165 "buffer size = 0x%lx, period size = 0x%lx\n", 179 "buffer size = 0x%lx, period size = 0x%lx\n",
166 substream->stream, pos, runtime->buffer_size, 180 name, pos, runtime->buffer_size,
167 runtime->period_size); 181 runtime->period_size);
168 } 182 }
169 pos = 0; 183 pos = 0;
@@ -197,7 +211,7 @@ static int snd_pcm_update_hw_ptr_post(struct snd_pcm_substream *substream,
197 211
198#define hw_ptr_error(substream, fmt, args...) \ 212#define hw_ptr_error(substream, fmt, args...) \
199 do { \ 213 do { \
200 if (xrun_debug(substream)) { \ 214 if (xrun_debug(substream, 1)) { \
201 if (printk_ratelimit()) { \ 215 if (printk_ratelimit()) { \
202 snd_printd("PCM: " fmt, ##args); \ 216 snd_printd("PCM: " fmt, ##args); \
203 } \ 217 } \
@@ -251,7 +265,7 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
251 } 265 }
252 266
253 /* Do jiffies check only in xrun_debug mode */ 267 /* Do jiffies check only in xrun_debug mode */
254 if (!xrun_debug(substream)) 268 if (!xrun_debug(substream, 4))
255 goto no_jiffies_check; 269 goto no_jiffies_check;
256 270
257 /* Skip the jiffies check for hardwares with BATCH flag. 271 /* Skip the jiffies check for hardwares with BATCH flag.
@@ -261,6 +275,9 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
261 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) 275 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
262 goto no_jiffies_check; 276 goto no_jiffies_check;
263 hdelta = new_hw_ptr - old_hw_ptr; 277 hdelta = new_hw_ptr - old_hw_ptr;
278 if (hdelta < runtime->delay)
279 goto no_jiffies_check;
280 hdelta -= runtime->delay;
264 jdelta = jiffies - runtime->hw_ptr_jiffies; 281 jdelta = jiffies - runtime->hw_ptr_jiffies;
265 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 282 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
266 delta = jdelta / 283 delta = jdelta /
@@ -294,14 +311,20 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
294 hw_ptr_interrupt = 311 hw_ptr_interrupt =
295 new_hw_ptr - new_hw_ptr % runtime->period_size; 312 new_hw_ptr - new_hw_ptr % runtime->period_size;
296 } 313 }
314 runtime->hw_ptr_interrupt = hw_ptr_interrupt;
315
297 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 316 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
298 runtime->silence_size > 0) 317 runtime->silence_size > 0)
299 snd_pcm_playback_silence(substream, new_hw_ptr); 318 snd_pcm_playback_silence(substream, new_hw_ptr);
300 319
320 if (runtime->status->hw_ptr == new_hw_ptr)
321 return 0;
322
301 runtime->hw_ptr_base = hw_base; 323 runtime->hw_ptr_base = hw_base;
302 runtime->status->hw_ptr = new_hw_ptr; 324 runtime->status->hw_ptr = new_hw_ptr;
303 runtime->hw_ptr_jiffies = jiffies; 325 runtime->hw_ptr_jiffies = jiffies;
304 runtime->hw_ptr_interrupt = hw_ptr_interrupt; 326 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
327 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
305 328
306 return snd_pcm_update_hw_ptr_post(substream, runtime); 329 return snd_pcm_update_hw_ptr_post(substream, runtime);
307} 330}
@@ -342,8 +365,12 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
342 new_hw_ptr = hw_base + pos; 365 new_hw_ptr = hw_base + pos;
343 } 366 }
344 /* Do jiffies check only in xrun_debug mode */ 367 /* Do jiffies check only in xrun_debug mode */
345 if (xrun_debug(substream) && 368 if (!xrun_debug(substream, 4))
346 ((delta * HZ) / runtime->rate) > jdelta + HZ/100) { 369 goto no_jiffies_check;
370 if (delta < runtime->delay)
371 goto no_jiffies_check;
372 delta -= runtime->delay;
373 if (((delta * HZ) / runtime->rate) > jdelta + HZ/100) {
347 hw_ptr_error(substream, 374 hw_ptr_error(substream,
348 "hw_ptr skipping! " 375 "hw_ptr skipping! "
349 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu)\n", 376 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu)\n",
@@ -352,13 +379,19 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
352 ((delta * HZ) / runtime->rate)); 379 ((delta * HZ) / runtime->rate));
353 return 0; 380 return 0;
354 } 381 }
382 no_jiffies_check:
355 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 383 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
356 runtime->silence_size > 0) 384 runtime->silence_size > 0)
357 snd_pcm_playback_silence(substream, new_hw_ptr); 385 snd_pcm_playback_silence(substream, new_hw_ptr);
358 386
387 if (runtime->status->hw_ptr == new_hw_ptr)
388 return 0;
389
359 runtime->hw_ptr_base = hw_base; 390 runtime->hw_ptr_base = hw_base;
360 runtime->status->hw_ptr = new_hw_ptr; 391 runtime->status->hw_ptr = new_hw_ptr;
361 runtime->hw_ptr_jiffies = jiffies; 392 runtime->hw_ptr_jiffies = jiffies;
393 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
394 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
362 395
363 return snd_pcm_update_hw_ptr_post(substream, runtime); 396 return snd_pcm_update_hw_ptr_post(substream, runtime);
364} 397}
@@ -452,7 +485,7 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
452 *r = 0; 485 *r = 0;
453 return UINT_MAX; 486 return UINT_MAX;
454 } 487 }
455 div64_32(&n, c, r); 488 n = div_u64_rem(n, c, r);
456 if (n >= UINT_MAX) { 489 if (n >= UINT_MAX) {
457 *r = 0; 490 *r = 0;
458 return UINT_MAX; 491 return UINT_MAX;
@@ -1524,6 +1557,23 @@ static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1524 return 0; 1557 return 0;
1525} 1558}
1526 1559
1560static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1561 void *arg)
1562{
1563 struct snd_pcm_hw_params *params = arg;
1564 snd_pcm_format_t format;
1565 int channels, width;
1566
1567 params->fifo_size = substream->runtime->hw.fifo_size;
1568 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1569 format = params_format(params);
1570 channels = params_channels(params);
1571 width = snd_pcm_format_physical_width(format);
1572 params->fifo_size /= width * channels;
1573 }
1574 return 0;
1575}
1576
1527/** 1577/**
1528 * snd_pcm_lib_ioctl - a generic PCM ioctl callback 1578 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1529 * @substream: the pcm substream instance 1579 * @substream: the pcm substream instance
@@ -1545,6 +1595,8 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1545 return snd_pcm_lib_ioctl_reset(substream, arg); 1595 return snd_pcm_lib_ioctl_reset(substream, arg);
1546 case SNDRV_PCM_IOCTL1_CHANNEL_INFO: 1596 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1547 return snd_pcm_lib_ioctl_channel_info(substream, arg); 1597 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1598 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1599 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1548 } 1600 }
1549 return -ENXIO; 1601 return -ENXIO;
1550} 1602}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index b5da656d1ece..84da3ba17c86 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -312,9 +312,18 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
312 312
313 hw = &substream->runtime->hw; 313 hw = &substream->runtime->hw;
314 if (!params->info) 314 if (!params->info)
315 params->info = hw->info; 315 params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES;
316 if (!params->fifo_size) 316 if (!params->fifo_size) {
317 params->fifo_size = hw->fifo_size; 317 if (snd_mask_min(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT]) ==
318 snd_mask_max(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT]) &&
319 snd_mask_min(&params->masks[SNDRV_PCM_HW_PARAM_CHANNELS]) ==
320 snd_mask_max(&params->masks[SNDRV_PCM_HW_PARAM_CHANNELS])) {
321 changed = substream->ops->ioctl(substream,
322 SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
323 if (params < 0)
324 return changed;
325 }
326 }
318 params->rmask = 0; 327 params->rmask = 0;
319 return 0; 328 return 0;
320} 329}
@@ -587,14 +596,15 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
587 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 596 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
588 status->avail = snd_pcm_playback_avail(runtime); 597 status->avail = snd_pcm_playback_avail(runtime);
589 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING || 598 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
590 runtime->status->state == SNDRV_PCM_STATE_DRAINING) 599 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
591 status->delay = runtime->buffer_size - status->avail; 600 status->delay = runtime->buffer_size - status->avail;
592 else 601 status->delay += runtime->delay;
602 } else
593 status->delay = 0; 603 status->delay = 0;
594 } else { 604 } else {
595 status->avail = snd_pcm_capture_avail(runtime); 605 status->avail = snd_pcm_capture_avail(runtime);
596 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) 606 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
597 status->delay = status->avail; 607 status->delay = status->avail + runtime->delay;
598 else 608 else
599 status->delay = 0; 609 status->delay = 0;
600 } 610 }
@@ -2410,6 +2420,7 @@ static int snd_pcm_delay(struct snd_pcm_substream *substream,
2410 n = snd_pcm_playback_hw_avail(runtime); 2420 n = snd_pcm_playback_hw_avail(runtime);
2411 else 2421 else
2412 n = snd_pcm_capture_avail(runtime); 2422 n = snd_pcm_capture_avail(runtime);
2423 n += runtime->delay;
2413 break; 2424 break;
2414 case SNDRV_PCM_STATE_XRUN: 2425 case SNDRV_PCM_STATE_XRUN:
2415 err = -EPIPE; 2426 err = -EPIPE;
diff --git a/sound/core/seq/Kconfig b/sound/core/seq/Kconfig
new file mode 100644
index 000000000000..b851fd890a89
--- /dev/null
+++ b/sound/core/seq/Kconfig
@@ -0,0 +1,16 @@
1# define SND_XXX_SEQ to min(SND_SEQUENCER,SND_XXX)
2
3config SND_RAWMIDI_SEQ
4 def_tristate SND_SEQUENCER && SND_RAWMIDI
5
6config SND_OPL3_LIB_SEQ
7 def_tristate SND_SEQUENCER && SND_OPL3_LIB
8
9config SND_OPL4_LIB_SEQ
10 def_tristate SND_SEQUENCER && SND_OPL4_LIB
11
12config SND_SBAWE_SEQ
13 def_tristate SND_SEQUENCER && SND_SBAWE
14
15config SND_EMU10K1_SEQ
16 def_tristate SND_SEQUENCER && SND_EMU10K1
diff --git a/sound/core/seq/Makefile b/sound/core/seq/Makefile
index 069593717fba..1bcb360330e5 100644
--- a/sound/core/seq/Makefile
+++ b/sound/core/seq/Makefile
@@ -17,14 +17,6 @@ snd-seq-midi-event-objs := seq_midi_event.o
17snd-seq-dummy-objs := seq_dummy.o 17snd-seq-dummy-objs := seq_dummy.o
18snd-seq-virmidi-objs := seq_virmidi.o 18snd-seq-virmidi-objs := seq_virmidi.o
19 19
20#
21# this function returns:
22# "m" - CONFIG_SND_SEQUENCER is m
23# <empty string> - CONFIG_SND_SEQUENCER is undefined
24# otherwise parameter #1 value
25#
26sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
27
28obj-$(CONFIG_SND_SEQUENCER) += snd-seq.o snd-seq-device.o 20obj-$(CONFIG_SND_SEQUENCER) += snd-seq.o snd-seq-device.o
29ifeq ($(CONFIG_SND_SEQUENCER_OSS),y) 21ifeq ($(CONFIG_SND_SEQUENCER_OSS),y)
30obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o 22obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
@@ -33,8 +25,8 @@ obj-$(CONFIG_SND_SEQ_DUMMY) += snd-seq-dummy.o
33 25
34# Toplevel Module Dependency 26# Toplevel Module Dependency
35obj-$(CONFIG_SND_VIRMIDI) += snd-seq-virmidi.o snd-seq-midi-event.o 27obj-$(CONFIG_SND_VIRMIDI) += snd-seq-virmidi.o snd-seq-midi-event.o
36obj-$(call sequencer,$(CONFIG_SND_RAWMIDI)) += snd-seq-midi.o snd-seq-midi-event.o 28obj-$(CONFIG_SND_RAWMIDI_SEQ) += snd-seq-midi.o snd-seq-midi-event.o
37obj-$(call sequencer,$(CONFIG_SND_OPL3_LIB)) += snd-seq-midi-event.o snd-seq-midi-emul.o 29obj-$(CONFIG_SND_OPL3_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
38obj-$(call sequencer,$(CONFIG_SND_OPL4_LIB)) += snd-seq-midi-event.o snd-seq-midi-emul.o 30obj-$(CONFIG_SND_OPL4_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
39obj-$(call sequencer,$(CONFIG_SND_SBAWE)) += snd-seq-midi-emul.o snd-seq-virmidi.o 31obj-$(CONFIG_SND_SBAWE_SEQ) += snd-seq-midi-emul.o snd-seq-virmidi.o
40obj-$(call sequencer,$(CONFIG_SND_EMU10K1)) += snd-seq-midi-emul.o snd-seq-virmidi.o 32obj-$(CONFIG_SND_EMU10K1_SEQ) += snd-seq-midi-emul.o snd-seq-virmidi.o
diff --git a/sound/drivers/opl3/Makefile b/sound/drivers/opl3/Makefile
index 19767a6a5c54..7f2c2a10c4e5 100644
--- a/sound/drivers/opl3/Makefile
+++ b/sound/drivers/opl3/Makefile
@@ -7,14 +7,6 @@ snd-opl3-lib-objs := opl3_lib.o opl3_synth.o
7snd-opl3-synth-y := opl3_seq.o opl3_midi.o opl3_drums.o 7snd-opl3-synth-y := opl3_seq.o opl3_midi.o opl3_drums.o
8snd-opl3-synth-$(CONFIG_SND_SEQUENCER_OSS) += opl3_oss.o 8snd-opl3-synth-$(CONFIG_SND_SEQUENCER_OSS) += opl3_oss.o
9 9
10#
11# this function returns:
12# "m" - CONFIG_SND_SEQUENCER is m
13# <empty string> - CONFIG_SND_SEQUENCER is undefined
14# otherwise parameter #1 value
15#
16sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
17
18obj-$(CONFIG_SND_OPL3_LIB) += snd-opl3-lib.o 10obj-$(CONFIG_SND_OPL3_LIB) += snd-opl3-lib.o
19obj-$(CONFIG_SND_OPL4_LIB) += snd-opl3-lib.o 11obj-$(CONFIG_SND_OPL4_LIB) += snd-opl3-lib.o
20obj-$(call sequencer,$(CONFIG_SND_OPL3_LIB)) += snd-opl3-synth.o 12obj-$(CONFIG_SND_OPL3_LIB_SEQ) += snd-opl3-synth.o
diff --git a/sound/drivers/opl4/Makefile b/sound/drivers/opl4/Makefile
index d178b39ffa60..b94009b0b19f 100644
--- a/sound/drivers/opl4/Makefile
+++ b/sound/drivers/opl4/Makefile
@@ -6,13 +6,5 @@
6snd-opl4-lib-objs := opl4_lib.o opl4_mixer.o opl4_proc.o 6snd-opl4-lib-objs := opl4_lib.o opl4_mixer.o opl4_proc.o
7snd-opl4-synth-objs := opl4_seq.o opl4_synth.o yrw801.o 7snd-opl4-synth-objs := opl4_seq.o opl4_synth.o yrw801.o
8 8
9#
10# this function returns:
11# "m" - CONFIG_SND_SEQUENCER is m
12# <empty string> - CONFIG_SND_SEQUENCER is undefined
13# otherwise parameter #1 value
14#
15sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
16
17obj-$(CONFIG_SND_OPL4_LIB) += snd-opl4-lib.o 9obj-$(CONFIG_SND_OPL4_LIB) += snd-opl4-lib.o
18obj-$(call sequencer,$(CONFIG_SND_OPL4_LIB)) += snd-opl4-synth.o 10obj-$(CONFIG_SND_OPL4_LIB_SEQ) += snd-opl4-synth.o
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index c6942a4de99b..51a7e3777e17 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -177,15 +177,18 @@ config SND_ES18XX
177 will be called snd-es18xx. 177 will be called snd-es18xx.
178 178
179config SND_SC6000 179config SND_SC6000
180 tristate "Gallant SC-6000, Audio Excel DSP 16" 180 tristate "Gallant SC-6000/6600/7000 and Audio Excel DSP 16"
181 depends on HAS_IOPORT 181 depends on HAS_IOPORT
182 select SND_WSS_LIB 182 select SND_WSS_LIB
183 select SND_OPL3_LIB 183 select SND_OPL3_LIB
184 select SND_MPU401_UART 184 select SND_MPU401_UART
185 help 185 help
186 Say Y here to include support for Gallant SC-6000 card and clones: 186 Say Y here to include support for Gallant SC-6000, SC-6600, SC-7000
187 cards and clones:
187 Audio Excel DSP 16 and Zoltrix AV302. 188 Audio Excel DSP 16 and Zoltrix AV302.
188 189
190 These cards are based on CompuMedia ASC-9308 or ASC-9408 chips.
191
189 To compile this driver as a module, choose M here: the module 192 To compile this driver as a module, choose M here: the module
190 will be called snd-sc6000. 193 will be called snd-sc6000.
191 194
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index 442b081cafb7..07df201ed8fa 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -193,7 +193,7 @@ static int __devexit snd_es1688_remove(struct device *dev, unsigned int n)
193static struct isa_driver snd_es1688_driver = { 193static struct isa_driver snd_es1688_driver = {
194 .match = snd_es1688_match, 194 .match = snd_es1688_match,
195 .probe = snd_es1688_probe, 195 .probe = snd_es1688_probe,
196 .remove = snd_es1688_remove, 196 .remove = __devexit_p(snd_es1688_remove),
197#if 0 /* FIXME */ 197#if 0 /* FIXME */
198 .suspend = snd_es1688_suspend, 198 .suspend = snd_es1688_suspend,
199 .resume = snd_es1688_resume, 199 .resume = snd_es1688_resume,
diff --git a/sound/isa/gus/gusextreme.c b/sound/isa/gus/gusextreme.c
index 180a8dea6bd9..65e4b18581a6 100644
--- a/sound/isa/gus/gusextreme.c
+++ b/sound/isa/gus/gusextreme.c
@@ -348,7 +348,7 @@ static int __devexit snd_gusextreme_remove(struct device *dev, unsigned int n)
348static struct isa_driver snd_gusextreme_driver = { 348static struct isa_driver snd_gusextreme_driver = {
349 .match = snd_gusextreme_match, 349 .match = snd_gusextreme_match,
350 .probe = snd_gusextreme_probe, 350 .probe = snd_gusextreme_probe,
351 .remove = snd_gusextreme_remove, 351 .remove = __devexit_p(snd_gusextreme_remove),
352#if 0 /* FIXME */ 352#if 0 /* FIXME */
353 .suspend = snd_gusextreme_suspend, 353 .suspend = snd_gusextreme_suspend,
354 .resume = snd_gusextreme_resume, 354 .resume = snd_gusextreme_resume,
diff --git a/sound/isa/sb/Makefile b/sound/isa/sb/Makefile
index 1098a56b2f4b..faeffceb01b7 100644
--- a/sound/isa/sb/Makefile
+++ b/sound/isa/sb/Makefile
@@ -13,14 +13,6 @@ snd-sbawe-objs := sbawe.o emu8000.o
13snd-emu8000-synth-objs := emu8000_synth.o emu8000_callback.o emu8000_patch.o emu8000_pcm.o 13snd-emu8000-synth-objs := emu8000_synth.o emu8000_callback.o emu8000_patch.o emu8000_pcm.o
14snd-es968-objs := es968.o 14snd-es968-objs := es968.o
15 15
16#
17# this function returns:
18# "m" - CONFIG_SND_SEQUENCER is m
19# <empty string> - CONFIG_SND_SEQUENCER is undefined
20# otherwise parameter #1 value
21#
22sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
23
24# Toplevel Module Dependency 16# Toplevel Module Dependency
25obj-$(CONFIG_SND_SB_COMMON) += snd-sb-common.o 17obj-$(CONFIG_SND_SB_COMMON) += snd-sb-common.o
26obj-$(CONFIG_SND_SB16_DSP) += snd-sb16-dsp.o 18obj-$(CONFIG_SND_SB16_DSP) += snd-sb16-dsp.o
@@ -33,4 +25,4 @@ ifeq ($(CONFIG_SND_SB16_CSP),y)
33 obj-$(CONFIG_SND_SB16) += snd-sb16-csp.o 25 obj-$(CONFIG_SND_SB16) += snd-sb16-csp.o
34 obj-$(CONFIG_SND_SBAWE) += snd-sb16-csp.o 26 obj-$(CONFIG_SND_SBAWE) += snd-sb16-csp.o
35endif 27endif
36obj-$(call sequencer,$(CONFIG_SND_SBAWE)) += snd-emu8000-synth.o 28obj-$(CONFIG_SND_SBAWE_SEQ) += snd-emu8000-synth.o
diff --git a/sound/isa/sc6000.c b/sound/isa/sc6000.c
index 782010608ef4..9a8bbf6dd62a 100644
--- a/sound/isa/sc6000.c
+++ b/sound/isa/sc6000.c
@@ -2,6 +2,8 @@
2 * Driver for Gallant SC-6000 soundcard. This card is also known as 2 * Driver for Gallant SC-6000 soundcard. This card is also known as
3 * Audio Excel DSP 16 or Zoltrix AV302. 3 * Audio Excel DSP 16 or Zoltrix AV302.
4 * These cards use CompuMedia ASC-9308 chip + AD1848 codec. 4 * These cards use CompuMedia ASC-9308 chip + AD1848 codec.
5 * SC-6600 and SC-7000 cards are also supported. They are based on
6 * CompuMedia ASC-9408 chip and CS4231 codec.
5 * 7 *
6 * Copyright (C) 2007 Krzysztof Helt <krzysztof.h1@wp.pl> 8 * Copyright (C) 2007 Krzysztof Helt <krzysztof.h1@wp.pl>
7 * 9 *
@@ -54,6 +56,7 @@ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
54 /* 0x300, 0x310, 0x320, 0x330 */ 56 /* 0x300, 0x310, 0x320, 0x330 */
55static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5, 7, 9, 10, 0 */ 57static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5, 7, 9, 10, 0 */
56static int dma[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0, 1, 3 */ 58static int dma[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0, 1, 3 */
59static bool joystick[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = false };
57 60
58module_param_array(index, int, NULL, 0444); 61module_param_array(index, int, NULL, 0444);
59MODULE_PARM_DESC(index, "Index value for sc-6000 based soundcard."); 62MODULE_PARM_DESC(index, "Index value for sc-6000 based soundcard.");
@@ -73,6 +76,8 @@ module_param_array(mpu_irq, int, NULL, 0444);
73MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for sc-6000 driver."); 76MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for sc-6000 driver.");
74module_param_array(dma, int, NULL, 0444); 77module_param_array(dma, int, NULL, 0444);
75MODULE_PARM_DESC(dma, "DMA # for sc-6000 driver."); 78MODULE_PARM_DESC(dma, "DMA # for sc-6000 driver.");
79module_param_array(joystick, bool, NULL, 0444);
80MODULE_PARM_DESC(joystick, "Enable gameport.");
76 81
77/* 82/*
78 * Commands of SC6000's DSP (SBPRO+special). 83 * Commands of SC6000's DSP (SBPRO+special).
@@ -191,7 +196,7 @@ static __devinit unsigned char sc6000_mpu_irq_to_softcfg(int mpu_irq)
191 return val; 196 return val;
192} 197}
193 198
194static __devinit int sc6000_wait_data(char __iomem *vport) 199static int sc6000_wait_data(char __iomem *vport)
195{ 200{
196 int loop = 1000; 201 int loop = 1000;
197 unsigned char val = 0; 202 unsigned char val = 0;
@@ -206,7 +211,7 @@ static __devinit int sc6000_wait_data(char __iomem *vport)
206 return -EAGAIN; 211 return -EAGAIN;
207} 212}
208 213
209static __devinit int sc6000_read(char __iomem *vport) 214static int sc6000_read(char __iomem *vport)
210{ 215{
211 if (sc6000_wait_data(vport)) 216 if (sc6000_wait_data(vport))
212 return -EBUSY; 217 return -EBUSY;
@@ -215,7 +220,7 @@ static __devinit int sc6000_read(char __iomem *vport)
215 220
216} 221}
217 222
218static __devinit int sc6000_write(char __iomem *vport, int cmd) 223static int sc6000_write(char __iomem *vport, int cmd)
219{ 224{
220 unsigned char val; 225 unsigned char val;
221 int loop = 500000; 226 int loop = 500000;
@@ -276,8 +281,33 @@ static int __devinit sc6000_dsp_reset(char __iomem *vport)
276} 281}
277 282
278/* detection and initialization */ 283/* detection and initialization */
279static int __devinit sc6000_cfg_write(char __iomem *vport, 284static int __devinit sc6000_hw_cfg_write(char __iomem *vport, const int *cfg)
280 unsigned char softcfg) 285{
286 if (sc6000_write(vport, COMMAND_6C) < 0) {
287 snd_printk(KERN_WARNING "CMD 0x%x: failed!\n", COMMAND_6C);
288 return -EIO;
289 }
290 if (sc6000_write(vport, COMMAND_5C) < 0) {
291 snd_printk(KERN_ERR "CMD 0x%x: failed!\n", COMMAND_5C);
292 return -EIO;
293 }
294 if (sc6000_write(vport, cfg[0]) < 0) {
295 snd_printk(KERN_ERR "DATA 0x%x: failed!\n", cfg[0]);
296 return -EIO;
297 }
298 if (sc6000_write(vport, cfg[1]) < 0) {
299 snd_printk(KERN_ERR "DATA 0x%x: failed!\n", cfg[1]);
300 return -EIO;
301 }
302 if (sc6000_write(vport, COMMAND_C5) < 0) {
303 snd_printk(KERN_ERR "CMD 0x%x: failed!\n", COMMAND_C5);
304 return -EIO;
305 }
306
307 return 0;
308}
309
310static int sc6000_cfg_write(char __iomem *vport, unsigned char softcfg)
281{ 311{
282 312
283 if (sc6000_write(vport, WRITE_MDIRQ_CFG)) { 313 if (sc6000_write(vport, WRITE_MDIRQ_CFG)) {
@@ -291,7 +321,7 @@ static int __devinit sc6000_cfg_write(char __iomem *vport,
291 return 0; 321 return 0;
292} 322}
293 323
294static int __devinit sc6000_setup_board(char __iomem *vport, int config) 324static int sc6000_setup_board(char __iomem *vport, int config)
295{ 325{
296 int loop = 10; 326 int loop = 10;
297 327
@@ -334,16 +364,39 @@ static int __devinit sc6000_init_mss(char __iomem *vport, int config,
334 return 0; 364 return 0;
335} 365}
336 366
337static int __devinit sc6000_init_board(char __iomem *vport, int irq, int dma, 367static void __devinit sc6000_hw_cfg_encode(char __iomem *vport, int *cfg,
338 char __iomem *vmss_port, int mpu_irq) 368 long xport, long xmpu,
369 long xmss_port, int joystick)
370{
371 cfg[0] = 0;
372 cfg[1] = 0;
373 if (xport == 0x240)
374 cfg[0] |= 1;
375 if (xmpu != SNDRV_AUTO_PORT) {
376 cfg[0] |= (xmpu & 0x30) >> 2;
377 cfg[1] |= 0x20;
378 }
379 if (xmss_port == 0xe80)
380 cfg[0] |= 0x10;
381 cfg[0] |= 0x40; /* always set */
382 if (!joystick)
383 cfg[0] |= 0x02;
384 cfg[1] |= 0x80; /* enable WSS system */
385 cfg[1] &= ~0x40; /* disable IDE */
386 snd_printd("hw cfg %x, %x\n", cfg[0], cfg[1]);
387}
388
389static int __devinit sc6000_init_board(char __iomem *vport,
390 char __iomem *vmss_port, int dev)
339{ 391{
340 char answer[15]; 392 char answer[15];
341 char version[2]; 393 char version[2];
342 int mss_config = sc6000_irq_to_softcfg(irq) | 394 int mss_config = sc6000_irq_to_softcfg(irq[dev]) |
343 sc6000_dma_to_softcfg(dma); 395 sc6000_dma_to_softcfg(dma[dev]);
344 int config = mss_config | 396 int config = mss_config |
345 sc6000_mpu_irq_to_softcfg(mpu_irq); 397 sc6000_mpu_irq_to_softcfg(mpu_irq[dev]);
346 int err; 398 int err;
399 int old = 0;
347 400
348 err = sc6000_dsp_reset(vport); 401 err = sc6000_dsp_reset(vport);
349 if (err < 0) { 402 if (err < 0) {
@@ -360,7 +413,6 @@ static int __devinit sc6000_init_board(char __iomem *vport, int irq, int dma,
360 /* 413 /*
361 * My SC-6000 card return "SC-6000" in DSPCopyright, so 414 * My SC-6000 card return "SC-6000" in DSPCopyright, so
362 * if we have something different, we have to be warned. 415 * if we have something different, we have to be warned.
363 * Mine returns "SC-6000A " - KH
364 */ 416 */
365 if (strncmp("SC-6000", answer, 7)) 417 if (strncmp("SC-6000", answer, 7))
366 snd_printk(KERN_WARNING "Warning: non SC-6000 audio card!\n"); 418 snd_printk(KERN_WARNING "Warning: non SC-6000 audio card!\n");
@@ -372,13 +424,32 @@ static int __devinit sc6000_init_board(char __iomem *vport, int irq, int dma,
372 printk(KERN_INFO PFX "Detected model: %s, DSP version %d.%d\n", 424 printk(KERN_INFO PFX "Detected model: %s, DSP version %d.%d\n",
373 answer, version[0], version[1]); 425 answer, version[0], version[1]);
374 426
375 /* 427 /* set configuration */
376 * 0x0A == (IRQ 7, DMA 1, MIRQ 0) 428 sc6000_write(vport, COMMAND_5C);
377 */ 429 if (sc6000_read(vport) < 0)
378 err = sc6000_cfg_write(vport, 0x0a); 430 old = 1;
431
432 if (!old) {
433 int cfg[2];
434 sc6000_hw_cfg_encode(vport, &cfg[0], port[dev], mpu_port[dev],
435 mss_port[dev], joystick[dev]);
436 if (sc6000_hw_cfg_write(vport, cfg) < 0) {
437 snd_printk(KERN_ERR "sc6000_hw_cfg_write: failed!\n");
438 return -EIO;
439 }
440 }
441 err = sc6000_setup_board(vport, config);
379 if (err < 0) { 442 if (err < 0) {
380 snd_printk(KERN_ERR "sc6000_cfg_write: failed!\n"); 443 snd_printk(KERN_ERR "sc6000_setup_board: failed!\n");
381 return -EFAULT; 444 return -ENODEV;
445 }
446
447 sc6000_dsp_reset(vport);
448
449 if (!old) {
450 sc6000_write(vport, COMMAND_60);
451 sc6000_write(vport, 0x02);
452 sc6000_dsp_reset(vport);
382 } 453 }
383 454
384 err = sc6000_setup_board(vport, config); 455 err = sc6000_setup_board(vport, config);
@@ -386,10 +457,9 @@ static int __devinit sc6000_init_board(char __iomem *vport, int irq, int dma,
386 snd_printk(KERN_ERR "sc6000_setup_board: failed!\n"); 457 snd_printk(KERN_ERR "sc6000_setup_board: failed!\n");
387 return -ENODEV; 458 return -ENODEV;
388 } 459 }
389
390 err = sc6000_init_mss(vport, config, vmss_port, mss_config); 460 err = sc6000_init_mss(vport, config, vmss_port, mss_config);
391 if (err < 0) { 461 if (err < 0) {
392 snd_printk(KERN_ERR "Can not initialize " 462 snd_printk(KERN_ERR "Cannot initialize "
393 "Microsoft Sound System mode.\n"); 463 "Microsoft Sound System mode.\n");
394 return -ENODEV; 464 return -ENODEV;
395 } 465 }
@@ -485,14 +555,16 @@ static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
485 struct snd_card *card; 555 struct snd_card *card;
486 struct snd_wss *chip; 556 struct snd_wss *chip;
487 struct snd_opl3 *opl3; 557 struct snd_opl3 *opl3;
488 char __iomem *vport; 558 char __iomem **vport;
489 char __iomem *vmss_port; 559 char __iomem *vmss_port;
490 560
491 561
492 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); 562 err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(vport),
563 &card);
493 if (err < 0) 564 if (err < 0)
494 return err; 565 return err;
495 566
567 vport = card->private_data;
496 if (xirq == SNDRV_AUTO_IRQ) { 568 if (xirq == SNDRV_AUTO_IRQ) {
497 xirq = snd_legacy_find_free_irq(possible_irqs); 569 xirq = snd_legacy_find_free_irq(possible_irqs);
498 if (xirq < 0) { 570 if (xirq < 0) {
@@ -517,8 +589,8 @@ static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
517 err = -EBUSY; 589 err = -EBUSY;
518 goto err_exit; 590 goto err_exit;
519 } 591 }
520 vport = devm_ioport_map(devptr, port[dev], 0x10); 592 *vport = devm_ioport_map(devptr, port[dev], 0x10);
521 if (!vport) { 593 if (*vport == NULL) {
522 snd_printk(KERN_ERR PFX 594 snd_printk(KERN_ERR PFX
523 "I/O port cannot be iomaped.\n"); 595 "I/O port cannot be iomaped.\n");
524 err = -EBUSY; 596 err = -EBUSY;
@@ -533,7 +605,7 @@ static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
533 goto err_unmap1; 605 goto err_unmap1;
534 } 606 }
535 vmss_port = devm_ioport_map(devptr, mss_port[dev], 4); 607 vmss_port = devm_ioport_map(devptr, mss_port[dev], 4);
536 if (!vport) { 608 if (!vmss_port) {
537 snd_printk(KERN_ERR PFX 609 snd_printk(KERN_ERR PFX
538 "MSS port I/O cannot be iomaped.\n"); 610 "MSS port I/O cannot be iomaped.\n");
539 err = -EBUSY; 611 err = -EBUSY;
@@ -544,7 +616,7 @@ static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
544 port[dev], xirq, xdma, 616 port[dev], xirq, xdma,
545 mpu_irq[dev] == SNDRV_AUTO_IRQ ? 0 : mpu_irq[dev]); 617 mpu_irq[dev] == SNDRV_AUTO_IRQ ? 0 : mpu_irq[dev]);
546 618
547 err = sc6000_init_board(vport, xirq, xdma, vmss_port, mpu_irq[dev]); 619 err = sc6000_init_board(*vport, vmss_port, dev);
548 if (err < 0) 620 if (err < 0)
549 goto err_unmap2; 621 goto err_unmap2;
550 622
@@ -552,7 +624,6 @@ static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
552 WSS_HW_DETECT, 0, &chip); 624 WSS_HW_DETECT, 0, &chip);
553 if (err < 0) 625 if (err < 0)
554 goto err_unmap2; 626 goto err_unmap2;
555 card->private_data = chip;
556 627
557 err = snd_wss_pcm(chip, 0, NULL); 628 err = snd_wss_pcm(chip, 0, NULL);
558 if (err < 0) { 629 if (err < 0) {
@@ -608,6 +679,7 @@ static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
608 return 0; 679 return 0;
609 680
610err_unmap2: 681err_unmap2:
682 sc6000_setup_board(*vport, 0);
611 release_region(mss_port[dev], 4); 683 release_region(mss_port[dev], 4);
612err_unmap1: 684err_unmap1:
613 release_region(port[dev], 0x10); 685 release_region(port[dev], 0x10);
@@ -618,11 +690,17 @@ err_exit:
618 690
619static int __devexit snd_sc6000_remove(struct device *devptr, unsigned int dev) 691static int __devexit snd_sc6000_remove(struct device *devptr, unsigned int dev)
620{ 692{
693 struct snd_card *card = dev_get_drvdata(devptr);
694 char __iomem **vport = card->private_data;
695
696 if (sc6000_setup_board(*vport, 0) < 0)
697 snd_printk(KERN_WARNING "sc6000_setup_board failed on exit!\n");
698
621 release_region(port[dev], 0x10); 699 release_region(port[dev], 0x10);
622 release_region(mss_port[dev], 4); 700 release_region(mss_port[dev], 4);
623 701
624 snd_card_free(dev_get_drvdata(devptr));
625 dev_set_drvdata(devptr, NULL); 702 dev_set_drvdata(devptr, NULL);
703 snd_card_free(card);
626 return 0; 704 return 0;
627} 705}
628 706
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 66f3b48ceafc..e497525bc11b 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -619,8 +619,7 @@ static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
619/* hw_free callback */ 619/* hw_free callback */
620static int snd_sgio2audio_pcm_hw_free(struct snd_pcm_substream *substream) 620static int snd_sgio2audio_pcm_hw_free(struct snd_pcm_substream *substream)
621{ 621{
622 if (substream->runtime->dma_area) 622 vfree(substream->runtime->dma_area);
623 vfree(substream->runtime->dma_area);
624 substream->runtime->dma_area = NULL; 623 substream->runtime->dma_area = NULL;
625 return 0; 624 return 0;
626} 625}
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index 6055fd6d3b38..e924492df21d 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -935,7 +935,7 @@ snd_harmony_create(struct snd_card *card,
935 h->iobase = ioremap_nocache(padev->hpa.start, HARMONY_SIZE); 935 h->iobase = ioremap_nocache(padev->hpa.start, HARMONY_SIZE);
936 if (h->iobase == NULL) { 936 if (h->iobase == NULL) {
937 printk(KERN_ERR PFX "unable to remap hpa 0x%lx\n", 937 printk(KERN_ERR PFX "unable to remap hpa 0x%lx\n",
938 padev->hpa.start); 938 (unsigned long)padev->hpa.start);
939 err = -EBUSY; 939 err = -EBUSY;
940 goto free_and_ret; 940 goto free_and_ret;
941 } 941 }
@@ -1020,7 +1020,7 @@ static struct parisc_driver snd_harmony_driver = {
1020 .name = "harmony", 1020 .name = "harmony",
1021 .id_table = snd_harmony_devtable, 1021 .id_table = snd_harmony_devtable,
1022 .probe = snd_harmony_probe, 1022 .probe = snd_harmony_probe,
1023 .remove = snd_harmony_remove, 1023 .remove = __devexit_p(snd_harmony_remove),
1024}; 1024};
1025 1025
1026static int __init 1026static int __init
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 93422e3a3f0c..748f6b7d90b7 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -275,6 +275,16 @@ config SND_CS5535AUDIO
275 To compile this driver as a module, choose M here: the module 275 To compile this driver as a module, choose M here: the module
276 will be called snd-cs5535audio. 276 will be called snd-cs5535audio.
277 277
278config SND_CTXFI
279 tristate "Creative Sound Blaster X-Fi"
280 select SND_PCM
281 help
282 If you want to use soundcards based on Creative Sound Blastr X-Fi
283 boards with 20k1 or 20k2 chips, say Y here.
284
285 To compile this driver as a module, choose M here: the module
286 will be called snd-ctxfi.
287
278config SND_DARLA20 288config SND_DARLA20
279 tristate "(Echoaudio) Darla20" 289 tristate "(Echoaudio) Darla20"
280 select FW_LOADER 290 select FW_LOADER
@@ -532,6 +542,9 @@ config SND_HDSP
532 To compile this driver as a module, choose M here: the module 542 To compile this driver as a module, choose M here: the module
533 will be called snd-hdsp. 543 will be called snd-hdsp.
534 544
545comment "Don't forget to add built-in firmwares for HDSP driver"
546 depends on SND_HDSP=y
547
535config SND_HDSPM 548config SND_HDSPM
536 tristate "RME Hammerfall DSP MADI" 549 tristate "RME Hammerfall DSP MADI"
537 select SND_HWDEP 550 select SND_HWDEP
@@ -622,6 +635,16 @@ config SND_KORG1212
622 To compile this driver as a module, choose M here: the module 635 To compile this driver as a module, choose M here: the module
623 will be called snd-korg1212. 636 will be called snd-korg1212.
624 637
638config SND_LX6464ES
639 tristate "Digigram LX6464ES"
640 select SND_PCM
641 help
642 Say Y here to include support for Digigram LX6464ES boards.
643
644 To compile this driver as a module, choose M here: the module
645 will be called snd-lx6464es.
646
647
625config SND_MAESTRO3 648config SND_MAESTRO3
626 tristate "ESS Allegro/Maestro3" 649 tristate "ESS Allegro/Maestro3"
627 select SND_AC97_CODEC 650 select SND_AC97_CODEC
@@ -764,8 +787,8 @@ config SND_VIRTUOSO
764 select SND_OXYGEN_LIB 787 select SND_OXYGEN_LIB
765 help 788 help
766 Say Y here to include support for sound cards based on the 789 Say Y here to include support for sound cards based on the
767 Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, and 790 Asus AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X,
768 Essence STX. 791 Essence ST (Deluxe), and Essence STX.
769 Support for the HDAV1.3 (Deluxe) is very experimental. 792 Support for the HDAV1.3 (Deluxe) is very experimental.
770 793
771 To compile this driver as a module, choose M here: the module 794 To compile this driver as a module, choose M here: the module
diff --git a/sound/pci/Makefile b/sound/pci/Makefile
index 65b25d221cd2..ecfc609d2b9f 100644
--- a/sound/pci/Makefile
+++ b/sound/pci/Makefile
@@ -59,9 +59,11 @@ obj-$(CONFIG_SND) += \
59 ali5451/ \ 59 ali5451/ \
60 au88x0/ \ 60 au88x0/ \
61 aw2/ \ 61 aw2/ \
62 ctxfi/ \
62 ca0106/ \ 63 ca0106/ \
63 cs46xx/ \ 64 cs46xx/ \
64 cs5535audio/ \ 65 cs5535audio/ \
66 lx6464es/ \
65 echoaudio/ \ 67 echoaudio/ \
66 emu10k1/ \ 68 emu10k1/ \
67 hda/ \ 69 hda/ \
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 3906f5afe27a..23f49f356e0f 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1255,8 +1255,8 @@ static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
1255 int temp; 1255 int temp;
1256 1256
1257 temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); 1257 temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2));
1258 temp = (dma->period_virt * dma->period_bytes) + (temp & POS_MASK); 1258 temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1));
1259 return (temp); 1259 return temp;
1260} 1260}
1261 1261
1262static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) 1262static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma)
@@ -1504,8 +1504,7 @@ static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
1504 int temp; 1504 int temp;
1505 1505
1506 temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)); 1506 temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2));
1507 //temp = (temp & POS_MASK) + (((temp>>WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK)*(dma->cfg0&POS_MASK)); 1507 temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1));
1508 temp = (temp & POS_MASK) + ((dma->period_virt) * (dma->period_bytes));
1509 return temp; 1508 return temp;
1510} 1509}
1511 1510
@@ -2441,7 +2440,8 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
2441 spin_lock(&vortex->lock); 2440 spin_lock(&vortex->lock);
2442 for (i = 0; i < NR_ADB; i++) { 2441 for (i = 0; i < NR_ADB; i++) {
2443 if (vortex->dma_adb[i].fifo_status == FIFO_START) { 2442 if (vortex->dma_adb[i].fifo_status == FIFO_START) {
2444 if (vortex_adbdma_bufshift(vortex, i)) ; 2443 if (!vortex_adbdma_bufshift(vortex, i))
2444 continue;
2445 spin_unlock(&vortex->lock); 2445 spin_unlock(&vortex->lock);
2446 snd_pcm_period_elapsed(vortex->dma_adb[i]. 2446 snd_pcm_period_elapsed(vortex->dma_adb[i].
2447 substream); 2447 substream);
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index ce3f2e90f4d7..24585c6c6d01 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -810,6 +810,8 @@ static struct pci_device_id snd_bt87x_ids[] = {
810 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x107d, 0x6606, GENERIC), 810 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x107d, 0x6606, GENERIC),
811 /* Voodoo TV 200 */ 811 /* Voodoo TV 200 */
812 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x121a, 0x3000, GENERIC), 812 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x121a, 0x3000, GENERIC),
813 /* Askey Computer Corp. MagicTView'99 */
814 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x144f, 0x3000, GENERIC),
813 /* AVerMedia Studio No. 103, 203, ...? */ 815 /* AVerMedia Studio No. 103, 203, ...? */
814 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1461, 0x0003, AVPHONE98), 816 BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, 0x1461, 0x0003, AVPHONE98),
815 /* Prolink PixelView PV-M4900 */ 817 /* Prolink PixelView PV-M4900 */
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index bfac30f7929f..57b992a5c057 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1319,7 +1319,6 @@ static int __devinit snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
1319 } 1319 }
1320 1320
1321 pcm->info_flags = 0; 1321 pcm->info_flags = 0;
1322 pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX;
1323 strcpy(pcm->name, "CA0106"); 1322 strcpy(pcm->name, "CA0106");
1324 1323
1325 for(substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; 1324 for(substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index c111efe61c3c..c8c6f437f5b3 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -739,7 +739,7 @@ static int __devinit rename_ctl(struct snd_card *card, const char *src, const ch
739 } while (0) 739 } while (0)
740 740
741static __devinitdata 741static __devinitdata
742DECLARE_TLV_DB_SCALE(snd_ca0106_master_db_scale, -6375, 50, 1); 742DECLARE_TLV_DB_SCALE(snd_ca0106_master_db_scale, -6375, 25, 1);
743 743
744static char *slave_vols[] __devinitdata = { 744static char *slave_vols[] __devinitdata = {
745 "Analog Front Playback Volume", 745 "Analog Front Playback Volume",
@@ -841,6 +841,9 @@ int __devinit snd_ca0106_mixer(struct snd_ca0106 *emu)
841 snd_ca0106_master_db_scale); 841 snd_ca0106_master_db_scale);
842 if (!vmaster) 842 if (!vmaster)
843 return -ENOMEM; 843 return -ENOMEM;
844 err = snd_ctl_add(card, vmaster);
845 if (err < 0)
846 return err;
844 add_slaves(card, vmaster, slave_vols); 847 add_slaves(card, vmaster, slave_vols);
845 848
846 if (emu->details->spi_dac == 1) { 849 if (emu->details->spi_dac == 1) {
@@ -848,8 +851,13 @@ int __devinit snd_ca0106_mixer(struct snd_ca0106 *emu)
848 NULL); 851 NULL);
849 if (!vmaster) 852 if (!vmaster)
850 return -ENOMEM; 853 return -ENOMEM;
854 err = snd_ctl_add(card, vmaster);
855 if (err < 0)
856 return err;
851 add_slaves(card, vmaster, slave_sws); 857 add_slaves(card, vmaster, slave_sws);
852 } 858 }
859
860 strcpy(card->mixername, "CA0106");
853 return 0; 861 return 0;
854} 862}
855 863
diff --git a/sound/pci/ctxfi/Makefile b/sound/pci/ctxfi/Makefile
new file mode 100644
index 000000000000..15075f89e98a
--- /dev/null
+++ b/sound/pci/ctxfi/Makefile
@@ -0,0 +1,5 @@
1snd-ctxfi-objs := xfi.o ctatc.o ctvmem.o ctpcm.o ctmixer.o ctresource.o \
2 ctsrc.o ctamixer.o ctdaio.o ctimap.o cthardware.o cttimer.o \
3 cthw20k2.o cthw20k1.o
4
5obj-$(CONFIG_SND_CTXFI) += snd-ctxfi.o
diff --git a/sound/pci/ctxfi/ct20k1reg.h b/sound/pci/ctxfi/ct20k1reg.h
new file mode 100644
index 000000000000..f2e34e3f27ee
--- /dev/null
+++ b/sound/pci/ctxfi/ct20k1reg.h
@@ -0,0 +1,636 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 */
8
9#ifndef CT20K1REG_H
10#define CT20k1REG_H
11
12/* 20k1 registers */
13#define DSPXRAM_START 0x000000
14#define DSPXRAM_END 0x013FFC
15#define DSPAXRAM_START 0x020000
16#define DSPAXRAM_END 0x023FFC
17#define DSPYRAM_START 0x040000
18#define DSPYRAM_END 0x04FFFC
19#define DSPAYRAM_START 0x020000
20#define DSPAYRAM_END 0x063FFC
21#define DSPMICRO_START 0x080000
22#define DSPMICRO_END 0x0B3FFC
23#define DSP0IO_START 0x100000
24#define DSP0IO_END 0x101FFC
25#define AUDIORINGIPDSP0_START 0x100000
26#define AUDIORINGIPDSP0_END 0x1003FC
27#define AUDIORINGOPDSP0_START 0x100400
28#define AUDIORINGOPDSP0_END 0x1007FC
29#define AUDPARARINGIODSP0_START 0x100800
30#define AUDPARARINGIODSP0_END 0x100BFC
31#define DSP0LOCALHWREG_START 0x100C00
32#define DSP0LOCALHWREG_END 0x100C3C
33#define DSP0XYRAMAGINDEX_START 0x100C40
34#define DSP0XYRAMAGINDEX_END 0x100C5C
35#define DSP0XYRAMAGMDFR_START 0x100C60
36#define DSP0XYRAMAGMDFR_END 0x100C7C
37#define DSP0INTCONTLVEC_START 0x100C80
38#define DSP0INTCONTLVEC_END 0x100CD8
39#define INTCONTLGLOBALREG_START 0x100D1C
40#define INTCONTLGLOBALREG_END 0x100D3C
41#define HOSTINTFPORTADDRCONTDSP0 0x100D40
42#define HOSTINTFPORTDATADSP0 0x100D44
43#define TIME0PERENBDSP0 0x100D60
44#define TIME0COUNTERDSP0 0x100D64
45#define TIME1PERENBDSP0 0x100D68
46#define TIME1COUNTERDSP0 0x100D6C
47#define TIME2PERENBDSP0 0x100D70
48#define TIME2COUNTERDSP0 0x100D74
49#define TIME3PERENBDSP0 0x100D78
50#define TIME3COUNTERDSP0 0x100D7C
51#define XRAMINDOPERREFNOUP_STARTDSP0 0x100D80
52#define XRAMINDOPERREFNOUP_ENDDSP0 0x100D9C
53#define XRAMINDOPERREFUP_STARTDSP0 0x100DA0
54#define XRAMINDOPERREFUP_ENDDSP0 0x100DBC
55#define YRAMINDOPERREFNOUP_STARTDSP0 0x100DC0
56#define YRAMINDOPERREFNOUP_ENDDSP0 0x100DDC
57#define YRAMINDOPERREFUP_STARTDSP0 0x100DE0
58#define YRAMINDOPERREFUP_ENDDSP0 0x100DFC
59#define DSP0CONDCODE 0x100E00
60#define DSP0STACKFLAG 0x100E04
61#define DSP0PROGCOUNTSTACKPTREG 0x100E08
62#define DSP0PROGCOUNTSTACKDATAREG 0x100E0C
63#define DSP0CURLOOPADDRREG 0x100E10
64#define DSP0CURLOOPCOUNT 0x100E14
65#define DSP0TOPLOOPCOUNTSTACK 0x100E18
66#define DSP0TOPLOOPADDRSTACK 0x100E1C
67#define DSP0LOOPSTACKPTR 0x100E20
68#define DSP0STASSTACKDATAREG 0x100E24
69#define DSP0STASSTACKPTR 0x100E28
70#define DSP0PROGCOUNT 0x100E2C
71#define GLOBDSPDEBGREG 0x100E30
72#define GLOBDSPBREPTRREG 0x100E30
73#define DSP0XYRAMBASE_START 0x100EA0
74#define DSP0XYRAMBASE_END 0x100EBC
75#define DSP0XYRAMLENG_START 0x100EC0
76#define DSP0XYRAMLENG_END 0x100EDC
77#define SEMAPHOREREGDSP0 0x100EE0
78#define DSP0INTCONTMASKREG 0x100EE4
79#define DSP0INTCONTPENDREG 0x100EE8
80#define DSP0INTCONTSERVINT 0x100EEC
81#define DSPINTCONTEXTINTMODREG 0x100EEC
82#define GPIODSP0 0x100EFC
83#define DMADSPBASEADDRREG_STARTDSP0 0x100F00
84#define DMADSPBASEADDRREG_ENDDSP0 0x100F1C
85#define DMAHOSTBASEADDRREG_STARTDSP0 0x100F20
86#define DMAHOSTBASEADDRREG_ENDDSP0 0x100F3C
87#define DMADSPCURADDRREG_STARTDSP0 0x100F40
88#define DMADSPCURADDRREG_ENDDSP0 0x100F5C
89#define DMAHOSTCURADDRREG_STARTDSP0 0x100F60
90#define DMAHOSTCURADDRREG_ENDDSP0 0x100F7C
91#define DMATANXCOUNTREG_STARTDSP0 0x100F80
92#define DMATANXCOUNTREG_ENDDSP0 0x100F9C
93#define DMATIMEBUGREG_STARTDSP0 0x100FA0
94#define DMATIMEBUGREG_ENDDSP0 0x100FAC
95#define DMACNTLMODFREG_STARTDSP0 0x100FA0
96#define DMACNTLMODFREG_ENDDSP0 0x100FAC
97
98#define DMAGLOBSTATSREGDSP0 0x100FEC
99#define DSP0XGPRAM_START 0x101000
100#define DSP0XGPRAM_END 0x1017FC
101#define DSP0YGPRAM_START 0x101800
102#define DSP0YGPRAM_END 0x101FFC
103
104
105
106
107#define AUDIORINGIPDSP1_START 0x102000
108#define AUDIORINGIPDSP1_END 0x1023FC
109#define AUDIORINGOPDSP1_START 0x102400
110#define AUDIORINGOPDSP1_END 0x1027FC
111#define AUDPARARINGIODSP1_START 0x102800
112#define AUDPARARINGIODSP1_END 0x102BFC
113#define DSP1LOCALHWREG_START 0x102C00
114#define DSP1LOCALHWREG_END 0x102C3C
115#define DSP1XYRAMAGINDEX_START 0x102C40
116#define DSP1XYRAMAGINDEX_END 0x102C5C
117#define DSP1XYRAMAGMDFR_START 0x102C60
118#define DSP1XYRAMAGMDFR_END 0x102C7C
119#define DSP1INTCONTLVEC_START 0x102C80
120#define DSP1INTCONTLVEC_END 0x102CD8
121#define HOSTINTFPORTADDRCONTDSP1 0x102D40
122#define HOSTINTFPORTDATADSP1 0x102D44
123#define TIME0PERENBDSP1 0x102D60
124#define TIME0COUNTERDSP1 0x102D64
125#define TIME1PERENBDSP1 0x102D68
126#define TIME1COUNTERDSP1 0x102D6C
127#define TIME2PERENBDSP1 0x102D70
128#define TIME2COUNTERDSP1 0x102D74
129#define TIME3PERENBDSP1 0x102D78
130#define TIME3COUNTERDSP1 0x102D7C
131#define XRAMINDOPERREFNOUP_STARTDSP1 0x102D80
132#define XRAMINDOPERREFNOUP_ENDDSP1 0x102D9C
133#define XRAMINDOPERREFUP_STARTDSP1 0x102DA0
134#define XRAMINDOPERREFUP_ENDDSP1 0x102DBC
135#define YRAMINDOPERREFNOUP_STARTDSP1 0x102DC0
136#define YRAMINDOPERREFNOUP_ENDDSP1 0x102DDC
137#define YRAMINDOPERREFUP_STARTDSP1 0x102DE0
138#define YRAMINDOPERREFUP_ENDDSP1 0x102DFC
139
140#define DSP1CONDCODE 0x102E00
141#define DSP1STACKFLAG 0x102E04
142#define DSP1PROGCOUNTSTACKPTREG 0x102E08
143#define DSP1PROGCOUNTSTACKDATAREG 0x102E0C
144#define DSP1CURLOOPADDRREG 0x102E10
145#define DSP1CURLOOPCOUNT 0x102E14
146#define DSP1TOPLOOPCOUNTSTACK 0x102E18
147#define DSP1TOPLOOPADDRSTACK 0x102E1C
148#define DSP1LOOPSTACKPTR 0x102E20
149#define DSP1STASSTACKDATAREG 0x102E24
150#define DSP1STASSTACKPTR 0x102E28
151#define DSP1PROGCOUNT 0x102E2C
152#define DSP1XYRAMBASE_START 0x102EA0
153#define DSP1XYRAMBASE_END 0x102EBC
154#define DSP1XYRAMLENG_START 0x102EC0
155#define DSP1XYRAMLENG_END 0x102EDC
156#define SEMAPHOREREGDSP1 0x102EE0
157#define DSP1INTCONTMASKREG 0x102EE4
158#define DSP1INTCONTPENDREG 0x102EE8
159#define DSP1INTCONTSERVINT 0x102EEC
160#define GPIODSP1 0x102EFC
161#define DMADSPBASEADDRREG_STARTDSP1 0x102F00
162#define DMADSPBASEADDRREG_ENDDSP1 0x102F1C
163#define DMAHOSTBASEADDRREG_STARTDSP1 0x102F20
164#define DMAHOSTBASEADDRREG_ENDDSP1 0x102F3C
165#define DMADSPCURADDRREG_STARTDSP1 0x102F40
166#define DMADSPCURADDRREG_ENDDSP1 0x102F5C
167#define DMAHOSTCURADDRREG_STARTDSP1 0x102F60
168#define DMAHOSTCURADDRREG_ENDDSP1 0x102F7C
169#define DMATANXCOUNTREG_STARTDSP1 0x102F80
170#define DMATANXCOUNTREG_ENDDSP1 0x102F9C
171#define DMATIMEBUGREG_STARTDSP1 0x102FA0
172#define DMATIMEBUGREG_ENDDSP1 0x102FAC
173#define DMACNTLMODFREG_STARTDSP1 0x102FA0
174#define DMACNTLMODFREG_ENDDSP1 0x102FAC
175
176#define DMAGLOBSTATSREGDSP1 0x102FEC
177#define DSP1XGPRAM_START 0x103000
178#define DSP1XGPRAM_END 0x1033FC
179#define DSP1YGPRAM_START 0x103400
180#define DSP1YGPRAM_END 0x1037FC
181
182
183
184#define AUDIORINGIPDSP2_START 0x104000
185#define AUDIORINGIPDSP2_END 0x1043FC
186#define AUDIORINGOPDSP2_START 0x104400
187#define AUDIORINGOPDSP2_END 0x1047FC
188#define AUDPARARINGIODSP2_START 0x104800
189#define AUDPARARINGIODSP2_END 0x104BFC
190#define DSP2LOCALHWREG_START 0x104C00
191#define DSP2LOCALHWREG_END 0x104C3C
192#define DSP2XYRAMAGINDEX_START 0x104C40
193#define DSP2XYRAMAGINDEX_END 0x104C5C
194#define DSP2XYRAMAGMDFR_START 0x104C60
195#define DSP2XYRAMAGMDFR_END 0x104C7C
196#define DSP2INTCONTLVEC_START 0x104C80
197#define DSP2INTCONTLVEC_END 0x104CD8
198#define HOSTINTFPORTADDRCONTDSP2 0x104D40
199#define HOSTINTFPORTDATADSP2 0x104D44
200#define TIME0PERENBDSP2 0x104D60
201#define TIME0COUNTERDSP2 0x104D64
202#define TIME1PERENBDSP2 0x104D68
203#define TIME1COUNTERDSP2 0x104D6C
204#define TIME2PERENBDSP2 0x104D70
205#define TIME2COUNTERDSP2 0x104D74
206#define TIME3PERENBDSP2 0x104D78
207#define TIME3COUNTERDSP2 0x104D7C
208#define XRAMINDOPERREFNOUP_STARTDSP2 0x104D80
209#define XRAMINDOPERREFNOUP_ENDDSP2 0x104D9C
210#define XRAMINDOPERREFUP_STARTDSP2 0x104DA0
211#define XRAMINDOPERREFUP_ENDDSP2 0x104DBC
212#define YRAMINDOPERREFNOUP_STARTDSP2 0x104DC0
213#define YRAMINDOPERREFNOUP_ENDDSP2 0x104DDC
214#define YRAMINDOPERREFUP_STARTDSP2 0x104DE0
215#define YRAMINDOPERREFUP_ENDDSP2 0x104DFC
216#define DSP2CONDCODE 0x104E00
217#define DSP2STACKFLAG 0x104E04
218#define DSP2PROGCOUNTSTACKPTREG 0x104E08
219#define DSP2PROGCOUNTSTACKDATAREG 0x104E0C
220#define DSP2CURLOOPADDRREG 0x104E10
221#define DSP2CURLOOPCOUNT 0x104E14
222#define DSP2TOPLOOPCOUNTSTACK 0x104E18
223#define DSP2TOPLOOPADDRSTACK 0x104E1C
224#define DSP2LOOPSTACKPTR 0x104E20
225#define DSP2STASSTACKDATAREG 0x104E24
226#define DSP2STASSTACKPTR 0x104E28
227#define DSP2PROGCOUNT 0x104E2C
228#define DSP2XYRAMBASE_START 0x104EA0
229#define DSP2XYRAMBASE_END 0x104EBC
230#define DSP2XYRAMLENG_START 0x104EC0
231#define DSP2XYRAMLENG_END 0x104EDC
232#define SEMAPHOREREGDSP2 0x104EE0
233#define DSP2INTCONTMASKREG 0x104EE4
234#define DSP2INTCONTPENDREG 0x104EE8
235#define DSP2INTCONTSERVINT 0x104EEC
236#define GPIODSP2 0x104EFC
237#define DMADSPBASEADDRREG_STARTDSP2 0x104F00
238#define DMADSPBASEADDRREG_ENDDSP2 0x104F1C
239#define DMAHOSTBASEADDRREG_STARTDSP2 0x104F20
240#define DMAHOSTBASEADDRREG_ENDDSP2 0x104F3C
241#define DMADSPCURADDRREG_STARTDSP2 0x104F40
242#define DMADSPCURADDRREG_ENDDSP2 0x104F5C
243#define DMAHOSTCURADDRREG_STARTDSP2 0x104F60
244#define DMAHOSTCURADDRREG_ENDDSP2 0x104F7C
245#define DMATANXCOUNTREG_STARTDSP2 0x104F80
246#define DMATANXCOUNTREG_ENDDSP2 0x104F9C
247#define DMATIMEBUGREG_STARTDSP2 0x104FA0
248#define DMATIMEBUGREG_ENDDSP2 0x104FAC
249#define DMACNTLMODFREG_STARTDSP2 0x104FA0
250#define DMACNTLMODFREG_ENDDSP2 0x104FAC
251
252#define DMAGLOBSTATSREGDSP2 0x104FEC
253#define DSP2XGPRAM_START 0x105000
254#define DSP2XGPRAM_END 0x1051FC
255#define DSP2YGPRAM_START 0x105800
256#define DSP2YGPRAM_END 0x1059FC
257
258
259
260#define AUDIORINGIPDSP3_START 0x106000
261#define AUDIORINGIPDSP3_END 0x1063FC
262#define AUDIORINGOPDSP3_START 0x106400
263#define AUDIORINGOPDSP3_END 0x1067FC
264#define AUDPARARINGIODSP3_START 0x106800
265#define AUDPARARINGIODSP3_END 0x106BFC
266#define DSP3LOCALHWREG_START 0x106C00
267#define DSP3LOCALHWREG_END 0x106C3C
268#define DSP3XYRAMAGINDEX_START 0x106C40
269#define DSP3XYRAMAGINDEX_END 0x106C5C
270#define DSP3XYRAMAGMDFR_START 0x106C60
271#define DSP3XYRAMAGMDFR_END 0x106C7C
272#define DSP3INTCONTLVEC_START 0x106C80
273#define DSP3INTCONTLVEC_END 0x106CD8
274#define HOSTINTFPORTADDRCONTDSP3 0x106D40
275#define HOSTINTFPORTDATADSP3 0x106D44
276#define TIME0PERENBDSP3 0x106D60
277#define TIME0COUNTERDSP3 0x106D64
278#define TIME1PERENBDSP3 0x106D68
279#define TIME1COUNTERDSP3 0x106D6C
280#define TIME2PERENBDSP3 0x106D70
281#define TIME2COUNTERDSP3 0x106D74
282#define TIME3PERENBDSP3 0x106D78
283#define TIME3COUNTERDSP3 0x106D7C
284#define XRAMINDOPERREFNOUP_STARTDSP3 0x106D80
285#define XRAMINDOPERREFNOUP_ENDDSP3 0x106D9C
286#define XRAMINDOPERREFUP_STARTDSP3 0x106DA0
287#define XRAMINDOPERREFUP_ENDDSP3 0x106DBC
288#define YRAMINDOPERREFNOUP_STARTDSP3 0x106DC0
289#define YRAMINDOPERREFNOUP_ENDDSP3 0x106DDC
290#define YRAMINDOPERREFUP_STARTDSP3 0x106DE0
291#define YRAMINDOPERREFUP_ENDDSP3 0x100DFC
292
293#define DSP3CONDCODE 0x106E00
294#define DSP3STACKFLAG 0x106E04
295#define DSP3PROGCOUNTSTACKPTREG 0x106E08
296#define DSP3PROGCOUNTSTACKDATAREG 0x106E0C
297#define DSP3CURLOOPADDRREG 0x106E10
298#define DSP3CURLOOPCOUNT 0x106E14
299#define DSP3TOPLOOPCOUNTSTACK 0x106E18
300#define DSP3TOPLOOPADDRSTACK 0x106E1C
301#define DSP3LOOPSTACKPTR 0x106E20
302#define DSP3STASSTACKDATAREG 0x106E24
303#define DSP3STASSTACKPTR 0x106E28
304#define DSP3PROGCOUNT 0x106E2C
305#define DSP3XYRAMBASE_START 0x106EA0
306#define DSP3XYRAMBASE_END 0x106EBC
307#define DSP3XYRAMLENG_START 0x106EC0
308#define DSP3XYRAMLENG_END 0x106EDC
309#define SEMAPHOREREGDSP3 0x106EE0
310#define DSP3INTCONTMASKREG 0x106EE4
311#define DSP3INTCONTPENDREG 0x106EE8
312#define DSP3INTCONTSERVINT 0x106EEC
313#define GPIODSP3 0x106EFC
314#define DMADSPBASEADDRREG_STARTDSP3 0x106F00
315#define DMADSPBASEADDRREG_ENDDSP3 0x106F1C
316#define DMAHOSTBASEADDRREG_STARTDSP3 0x106F20
317#define DMAHOSTBASEADDRREG_ENDDSP3 0x106F3C
318#define DMADSPCURADDRREG_STARTDSP3 0x106F40
319#define DMADSPCURADDRREG_ENDDSP3 0x106F5C
320#define DMAHOSTCURADDRREG_STARTDSP3 0x106F60
321#define DMAHOSTCURADDRREG_ENDDSP3 0x106F7C
322#define DMATANXCOUNTREG_STARTDSP3 0x106F80
323#define DMATANXCOUNTREG_ENDDSP3 0x106F9C
324#define DMATIMEBUGREG_STARTDSP3 0x106FA0
325#define DMATIMEBUGREG_ENDDSP3 0x106FAC
326#define DMACNTLMODFREG_STARTDSP3 0x106FA0
327#define DMACNTLMODFREG_ENDDSP3 0x106FAC
328
329#define DMAGLOBSTATSREGDSP3 0x106FEC
330#define DSP3XGPRAM_START 0x107000
331#define DSP3XGPRAM_END 0x1071FC
332#define DSP3YGPRAM_START 0x107800
333#define DSP3YGPRAM_END 0x1079FC
334
335/* end of DSP reg definitions */
336
337#define DSPAIMAP_START 0x108000
338#define DSPAIMAP_END 0x1083FC
339#define DSPPIMAP_START 0x108400
340#define DSPPIMAP_END 0x1087FC
341#define DSPPOMAP_START 0x108800
342#define DSPPOMAP_END 0x108BFC
343#define DSPPOCTL 0x108C00
344#define TKCTL_START 0x110000
345#define TKCTL_END 0x110FFC
346#define TKCC_START 0x111000
347#define TKCC_END 0x111FFC
348#define TKIMAP_START 0x112000
349#define TKIMAP_END 0x112FFC
350#define TKDCTR16 0x113000
351#define TKPB16 0x113004
352#define TKBS16 0x113008
353#define TKDCTR32 0x11300C
354#define TKPB32 0x113010
355#define TKBS32 0x113014
356#define ICDCTR16 0x113018
357#define ITBS16 0x11301C
358#define ICDCTR32 0x113020
359#define ITBS32 0x113024
360#define ITSTART 0x113028
361#define TKSQ 0x11302C
362
363#define TKSCCTL_START 0x114000
364#define TKSCCTL_END 0x11403C
365#define TKSCADR_START 0x114100
366#define TKSCADR_END 0x11413C
367#define TKSCDATAX_START 0x114800
368#define TKSCDATAX_END 0x1149FC
369#define TKPCDATAX_START 0x120000
370#define TKPCDATAX_END 0x12FFFC
371
372#define MALSA 0x130000
373#define MAPPHA 0x130004
374#define MAPPLA 0x130008
375#define MALSB 0x130010
376#define MAPPHB 0x130014
377#define MAPPLB 0x130018
378
379#define TANSPORTMAPABREGS_START 0x130020
380#define TANSPORTMAPABREGS_END 0x13A2FC
381
382#define PTPAHX 0x13B000
383#define PTPALX 0x13B004
384
385#define TANSPPAGETABLEPHYADDR015_START 0x13B008
386#define TANSPPAGETABLEPHYADDR015_END 0x13B07C
387#define TRNQADRX_START 0x13B100
388#define TRNQADRX_END 0x13B13C
389#define TRNQTIMX_START 0x13B200
390#define TRNQTIMX_END 0x13B23C
391#define TRNQAPARMX_START 0x13B300
392#define TRNQAPARMX_END 0x13B33C
393
394#define TRNQCNT 0x13B400
395#define TRNCTL 0x13B404
396#define TRNIS 0x13B408
397#define TRNCURTS 0x13B40C
398
399#define AMOP_START 0x140000
400#define AMOPLO 0x140000
401#define AMOPHI 0x140004
402#define AMOP_END 0x147FFC
403#define PMOP_START 0x148000
404#define PMOPLO 0x148000
405#define PMOPHI 0x148004
406#define PMOP_END 0x14FFFC
407#define PCURR_START 0x150000
408#define PCURR_END 0x153FFC
409#define PTRAG_START 0x154000
410#define PTRAG_END 0x157FFC
411#define PSR_START 0x158000
412#define PSR_END 0x15BFFC
413
414#define PFSTAT4SEG_START 0x160000
415#define PFSTAT4SEG_END 0x160BFC
416#define PFSTAT2SEG_START 0x160C00
417#define PFSTAT2SEG_END 0x1617FC
418#define PFTARG4SEG_START 0x164000
419#define PFTARG4SEG_END 0x164BFC
420#define PFTARG2SEG_START 0x164C00
421#define PFTARG2SEG_END 0x1657FC
422#define PFSR4SEG_START 0x168000
423#define PFSR4SEG_END 0x168BFC
424#define PFSR2SEG_START 0x168C00
425#define PFSR2SEG_END 0x1697FC
426#define PCURRMS4SEG_START 0x16C000
427#define PCURRMS4SEG_END 0x16CCFC
428#define PCURRMS2SEG_START 0x16CC00
429#define PCURRMS2SEG_END 0x16D7FC
430#define PTARGMS4SEG_START 0x170000
431#define PTARGMS4SEG_END 0x172FFC
432#define PTARGMS2SEG_START 0x173000
433#define PTARGMS2SEG_END 0x1747FC
434#define PSRMS4SEG_START 0x170000
435#define PSRMS4SEG_END 0x172FFC
436#define PSRMS2SEG_START 0x173000
437#define PSRMS2SEG_END 0x1747FC
438
439#define PRING_LO_START 0x190000
440#define PRING_LO_END 0x193FFC
441#define PRING_HI_START 0x194000
442#define PRING_HI_END 0x197FFC
443#define PRING_LO_HI_START 0x198000
444#define PRING_LO_HI 0x198000
445#define PRING_LO_HI_END 0x19BFFC
446
447#define PINTFIFO 0x1A0000
448#define SRCCTL 0x1B0000
449#define SRCCCR 0x1B0004
450#define SRCIMAP 0x1B0008
451#define SRCODDC 0x1B000C
452#define SRCCA 0x1B0010
453#define SRCCF 0x1B0014
454#define SRCSA 0x1B0018
455#define SRCLA 0x1B001C
456#define SRCCTLSWR 0x1B0020
457
458/* SRC HERE */
459#define SRCALBA 0x1B002C
460#define SRCMCTL 0x1B012C
461#define SRCCERR 0x1B022C
462#define SRCITB 0x1B032C
463#define SRCIPM 0x1B082C
464#define SRCIP 0x1B102C
465#define SRCENBSTAT 0x1B202C
466#define SRCENBLO 0x1B212C
467#define SRCENBHI 0x1B222C
468#define SRCENBS 0x1B232C
469#define SRCENB 0x1B282C
470#define SRCENB07 0x1B282C
471#define SRCENBS07 0x1B302C
472
473#define SRCDN0Z 0x1B0030
474#define SRCDN0Z0 0x1B0030
475#define SRCDN0Z1 0x1B0034
476#define SRCDN0Z2 0x1B0038
477#define SRCDN0Z3 0x1B003C
478#define SRCDN1Z 0x1B0040
479#define SRCDN1Z0 0x1B0040
480#define SRCDN1Z1 0x1B0044
481#define SRCDN1Z2 0x1B0048
482#define SRCDN1Z3 0x1B004C
483#define SRCDN1Z4 0x1B0050
484#define SRCDN1Z5 0x1B0054
485#define SRCDN1Z6 0x1B0058
486#define SRCDN1Z7 0x1B005C
487#define SRCUPZ 0x1B0060
488#define SRCUPZ0 0x1B0060
489#define SRCUPZ1 0x1B0064
490#define SRCUPZ2 0x1B0068
491#define SRCUPZ3 0x1B006C
492#define SRCUPZ4 0x1B0070
493#define SRCUPZ5 0x1B0074
494#define SRCUPZ6 0x1B0078
495#define SRCUPZ7 0x1B007C
496#define SRCCD0 0x1B0080
497#define SRCCD1 0x1B0084
498#define SRCCD2 0x1B0088
499#define SRCCD3 0x1B008C
500#define SRCCD4 0x1B0090
501#define SRCCD5 0x1B0094
502#define SRCCD6 0x1B0098
503#define SRCCD7 0x1B009C
504#define SRCCD8 0x1B00A0
505#define SRCCD9 0x1B00A4
506#define SRCCDA 0x1B00A8
507#define SRCCDB 0x1B00AC
508#define SRCCDC 0x1B00B0
509#define SRCCDD 0x1B00B4
510#define SRCCDE 0x1B00B8
511#define SRCCDF 0x1B00BC
512#define SRCCD10 0x1B00C0
513#define SRCCD11 0x1B00C4
514#define SRCCD12 0x1B00C8
515#define SRCCD13 0x1B00CC
516#define SRCCD14 0x1B00D0
517#define SRCCD15 0x1B00D4
518#define SRCCD16 0x1B00D8
519#define SRCCD17 0x1B00DC
520#define SRCCD18 0x1B00E0
521#define SRCCD19 0x1B00E4
522#define SRCCD1A 0x1B00E8
523#define SRCCD1B 0x1B00EC
524#define SRCCD1C 0x1B00F0
525#define SRCCD1D 0x1B00F4
526#define SRCCD1E 0x1B00F8
527#define SRCCD1F 0x1B00FC
528
529#define SRCCONTRBLOCK_START 0x1B0100
530#define SRCCONTRBLOCK_END 0x1BFFFC
531#define FILTOP_START 0x1C0000
532#define FILTOP_END 0x1C05FC
533#define FILTIMAP_START 0x1C0800
534#define FILTIMAP_END 0x1C0DFC
535#define FILTZ1_START 0x1C1000
536#define FILTZ1_END 0x1C15FC
537#define FILTZ2_START 0x1C1800
538#define FILTZ2_END 0x1C1DFC
539#define DAOIMAP_START 0x1C5000
540#define DAOIMAP 0x1C5000
541#define DAOIMAP_END 0x1C5124
542
543#define AC97D 0x1C5400
544#define AC97A 0x1C5404
545#define AC97CTL 0x1C5408
546#define I2SCTL 0x1C5420
547
548#define SPOS 0x1C5440
549#define SPOSA 0x1C5440
550#define SPOSB 0x1C5444
551#define SPOSC 0x1C5448
552#define SPOSD 0x1C544C
553
554#define SPISA 0x1C5450
555#define SPISB 0x1C5454
556#define SPISC 0x1C5458
557#define SPISD 0x1C545C
558
559#define SPFSCTL 0x1C5460
560
561#define SPFS0 0x1C5468
562#define SPFS1 0x1C546C
563#define SPFS2 0x1C5470
564#define SPFS3 0x1C5474
565#define SPFS4 0x1C5478
566#define SPFS5 0x1C547C
567
568#define SPOCTL 0x1C5480
569#define SPICTL 0x1C5484
570#define SPISTS 0x1C5488
571#define SPINTP 0x1C548C
572#define SPINTE 0x1C5490
573#define SPUTCTLAB 0x1C5494
574#define SPUTCTLCD 0x1C5498
575
576#define SRTSPA 0x1C54C0
577#define SRTSPB 0x1C54C4
578#define SRTSPC 0x1C54C8
579#define SRTSPD 0x1C54CC
580
581#define SRTSCTL 0x1C54D0
582#define SRTSCTLA 0x1C54D0
583#define SRTSCTLB 0x1C54D4
584#define SRTSCTLC 0x1C54D8
585#define SRTSCTLD 0x1C54DC
586
587#define SRTI2S 0x1C54E0
588#define SRTICTL 0x1C54F0
589
590#define WC 0x1C6000
591#define TIMR 0x1C6004
592# define TIMR_IE (1<<15)
593# define TIMR_IP (1<<14)
594
595#define GIP 0x1C6010
596#define GIE 0x1C6014
597#define DIE 0x1C6018
598#define DIC 0x1C601C
599#define GPIO 0x1C6020
600#define GPIOCTL 0x1C6024
601#define GPIP 0x1C6028
602#define GPIE 0x1C602C
603#define DSPINT0 0x1C6030
604#define DSPEIOC 0x1C6034
605#define MUADAT 0x1C6040
606#define MUACMD 0x1C6044
607#define MUASTAT 0x1C6044
608#define MUBDAT 0x1C6048
609#define MUBCMD 0x1C604C
610#define MUBSTAT 0x1C604C
611#define UARTCMA 0x1C6050
612#define UARTCMB 0x1C6054
613#define UARTIP 0x1C6058
614#define UARTIE 0x1C605C
615#define PLLCTL 0x1C6060
616#define PLLDCD 0x1C6064
617#define GCTL 0x1C6070
618#define ID0 0x1C6080
619#define ID1 0x1C6084
620#define ID2 0x1C6088
621#define ID3 0x1C608C
622#define SDRCTL 0x1C7000
623
624
625#define I2SA_L 0x0L
626#define I2SA_R 0x1L
627#define I2SB_L 0x8L
628#define I2SB_R 0x9L
629#define I2SC_L 0x10L
630#define I2SC_R 0x11L
631#define I2SD_L 0x18L
632#define I2SD_R 0x19L
633
634#endif /* CT20K1REG_H */
635
636
diff --git a/sound/pci/ctxfi/ct20k2reg.h b/sound/pci/ctxfi/ct20k2reg.h
new file mode 100644
index 000000000000..2d07986f57cc
--- /dev/null
+++ b/sound/pci/ctxfi/ct20k2reg.h
@@ -0,0 +1,85 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 */
8
9#ifndef _20K2REGISTERS_H_
10#define _20K2REGISTERS_H_
11
12
13/* Timer Registers */
14#define TIMER_TIMR 0x1B7004
15#define INTERRUPT_GIP 0x1B7010
16#define INTERRUPT_GIE 0x1B7014
17
18/* I2C Registers */
19#define I2C_IF_ADDRESS 0x1B9000
20#define I2C_IF_WDATA 0x1B9004
21#define I2C_IF_RDATA 0x1B9008
22#define I2C_IF_STATUS 0x1B900C
23#define I2C_IF_WLOCK 0x1B9010
24
25/* Global Control Registers */
26#define GLOBAL_CNTL_GCTL 0x1B7090
27
28/* PLL Registers */
29#define PLL_CTL 0x1B7080
30#define PLL_STAT 0x1B7084
31#define PLL_ENB 0x1B7088
32
33/* SRC Registers */
34#define SRC_CTL 0x1A0000 /* 0x1A0000 + (256 * Chn) */
35#define SRC_CCR 0x1A0004 /* 0x1A0004 + (256 * Chn) */
36#define SRC_IMAP 0x1A0008 /* 0x1A0008 + (256 * Chn) */
37#define SRC_CA 0x1A0010 /* 0x1A0010 + (256 * Chn) */
38#define SRC_CF 0x1A0014 /* 0x1A0014 + (256 * Chn) */
39#define SRC_SA 0x1A0018 /* 0x1A0018 + (256 * Chn) */
40#define SRC_LA 0x1A001C /* 0x1A001C + (256 * Chn) */
41#define SRC_CTLSWR 0x1A0020 /* 0x1A0020 + (256 * Chn) */
42#define SRC_CD 0x1A0080 /* 0x1A0080 + (256 * Chn) + (4 * Regn) */
43#define SRC_MCTL 0x1A012C
44#define SRC_IP 0x1A102C /* 0x1A102C + (256 * Regn) */
45#define SRC_ENB 0x1A282C /* 0x1A282C + (256 * Regn) */
46#define SRC_ENBSTAT 0x1A202C
47#define SRC_ENBSA 0x1A232C
48#define SRC_DN0Z 0x1A0030
49#define SRC_DN1Z 0x1A0040
50#define SRC_UPZ 0x1A0060
51
52/* GPIO Registers */
53#define GPIO_DATA 0x1B7020
54#define GPIO_CTRL 0x1B7024
55
56/* Virtual memory registers */
57#define VMEM_PTPAL 0x1C6300 /* 0x1C6300 + (16 * Chn) */
58#define VMEM_PTPAH 0x1C6304 /* 0x1C6304 + (16 * Chn) */
59#define VMEM_CTL 0x1C7000
60
61/* Transport Registers */
62#define TRANSPORT_ENB 0x1B6000
63#define TRANSPORT_CTL 0x1B6004
64#define TRANSPORT_INT 0x1B6008
65
66/* Audio IO */
67#define AUDIO_IO_AIM 0x1B5000 /* 0x1B5000 + (0x04 * Chn) */
68#define AUDIO_IO_TX_CTL 0x1B5400 /* 0x1B5400 + (0x40 * Chn) */
69#define AUDIO_IO_TX_CSTAT_L 0x1B5408 /* 0x1B5408 + (0x40 * Chn) */
70#define AUDIO_IO_TX_CSTAT_H 0x1B540C /* 0x1B540C + (0x40 * Chn) */
71#define AUDIO_IO_RX_CTL 0x1B5410 /* 0x1B5410 + (0x40 * Chn) */
72#define AUDIO_IO_RX_SRT_CTL 0x1B5420 /* 0x1B5420 + (0x40 * Chn) */
73#define AUDIO_IO_MCLK 0x1B5600
74#define AUDIO_IO_TX_BLRCLK 0x1B5604
75#define AUDIO_IO_RX_BLRCLK 0x1B5608
76
77/* Mixer */
78#define MIXER_AMOPLO 0x130000 /* 0x130000 + (8 * Chn) [4095 : 0] */
79#define MIXER_AMOPHI 0x130004 /* 0x130004 + (8 * Chn) [4095 : 0] */
80#define MIXER_PRING_LO_HI 0x188000 /* 0x188000 + (4 * Chn) [4095 : 0] */
81#define MIXER_PMOPLO 0x138000 /* 0x138000 + (8 * Chn) [4095 : 0] */
82#define MIXER_PMOPHI 0x138004 /* 0x138004 + (8 * Chn) [4095 : 0] */
83#define MIXER_AR_ENABLE 0x19000C
84
85#endif
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
new file mode 100644
index 000000000000..a1db51b3ead8
--- /dev/null
+++ b/sound/pci/ctxfi/ctamixer.c
@@ -0,0 +1,488 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctamixer.c
9 *
10 * @Brief
11 * This file contains the implementation of the Audio Mixer
12 * resource management object.
13 *
14 * @Author Liu Chun
15 * @Date May 21 2008
16 *
17 */
18
19#include "ctamixer.h"
20#include "cthardware.h"
21#include <linux/slab.h>
22
23#define AMIXER_RESOURCE_NUM 256
24#define SUM_RESOURCE_NUM 256
25
26#define AMIXER_Y_IMMEDIATE 1
27
28#define BLANK_SLOT 4094
29
30static int amixer_master(struct rsc *rsc)
31{
32 rsc->conj = 0;
33 return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
34}
35
36static int amixer_next_conj(struct rsc *rsc)
37{
38 rsc->conj++;
39 return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
40}
41
42static int amixer_index(const struct rsc *rsc)
43{
44 return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
45}
46
47static int amixer_output_slot(const struct rsc *rsc)
48{
49 return (amixer_index(rsc) << 4) + 0x4;
50}
51
52static struct rsc_ops amixer_basic_rsc_ops = {
53 .master = amixer_master,
54 .next_conj = amixer_next_conj,
55 .index = amixer_index,
56 .output_slot = amixer_output_slot,
57};
58
59static int amixer_set_input(struct amixer *amixer, struct rsc *rsc)
60{
61 struct hw *hw;
62
63 hw = amixer->rsc.hw;
64 hw->amixer_set_mode(amixer->rsc.ctrl_blk, AMIXER_Y_IMMEDIATE);
65 amixer->input = rsc;
66 if (NULL == rsc)
67 hw->amixer_set_x(amixer->rsc.ctrl_blk, BLANK_SLOT);
68 else
69 hw->amixer_set_x(amixer->rsc.ctrl_blk,
70 rsc->ops->output_slot(rsc));
71
72 return 0;
73}
74
75/* y is a 14-bit immediate constant */
76static int amixer_set_y(struct amixer *amixer, unsigned int y)
77{
78 struct hw *hw;
79
80 hw = amixer->rsc.hw;
81 hw->amixer_set_y(amixer->rsc.ctrl_blk, y);
82
83 return 0;
84}
85
86static int amixer_set_invalid_squash(struct amixer *amixer, unsigned int iv)
87{
88 struct hw *hw;
89
90 hw = amixer->rsc.hw;
91 hw->amixer_set_iv(amixer->rsc.ctrl_blk, iv);
92
93 return 0;
94}
95
96static int amixer_set_sum(struct amixer *amixer, struct sum *sum)
97{
98 struct hw *hw;
99
100 hw = amixer->rsc.hw;
101 amixer->sum = sum;
102 if (NULL == sum) {
103 hw->amixer_set_se(amixer->rsc.ctrl_blk, 0);
104 } else {
105 hw->amixer_set_se(amixer->rsc.ctrl_blk, 1);
106 hw->amixer_set_sadr(amixer->rsc.ctrl_blk,
107 sum->rsc.ops->index(&sum->rsc));
108 }
109
110 return 0;
111}
112
113static int amixer_commit_write(struct amixer *amixer)
114{
115 struct hw *hw;
116 unsigned int index;
117 int i;
118 struct rsc *input;
119 struct sum *sum;
120
121 hw = amixer->rsc.hw;
122 input = amixer->input;
123 sum = amixer->sum;
124
125 /* Program master and conjugate resources */
126 amixer->rsc.ops->master(&amixer->rsc);
127 if (NULL != input)
128 input->ops->master(input);
129
130 if (NULL != sum)
131 sum->rsc.ops->master(&sum->rsc);
132
133 for (i = 0; i < amixer->rsc.msr; i++) {
134 hw->amixer_set_dirty_all(amixer->rsc.ctrl_blk);
135 if (NULL != input) {
136 hw->amixer_set_x(amixer->rsc.ctrl_blk,
137 input->ops->output_slot(input));
138 input->ops->next_conj(input);
139 }
140 if (NULL != sum) {
141 hw->amixer_set_sadr(amixer->rsc.ctrl_blk,
142 sum->rsc.ops->index(&sum->rsc));
143 sum->rsc.ops->next_conj(&sum->rsc);
144 }
145 index = amixer->rsc.ops->output_slot(&amixer->rsc);
146 hw->amixer_commit_write(hw, index, amixer->rsc.ctrl_blk);
147 amixer->rsc.ops->next_conj(&amixer->rsc);
148 }
149 amixer->rsc.ops->master(&amixer->rsc);
150 if (NULL != input)
151 input->ops->master(input);
152
153 if (NULL != sum)
154 sum->rsc.ops->master(&sum->rsc);
155
156 return 0;
157}
158
159static int amixer_commit_raw_write(struct amixer *amixer)
160{
161 struct hw *hw;
162 unsigned int index;
163
164 hw = amixer->rsc.hw;
165 index = amixer->rsc.ops->output_slot(&amixer->rsc);
166 hw->amixer_commit_write(hw, index, amixer->rsc.ctrl_blk);
167
168 return 0;
169}
170
171static int amixer_get_y(struct amixer *amixer)
172{
173 struct hw *hw;
174
175 hw = amixer->rsc.hw;
176 return hw->amixer_get_y(amixer->rsc.ctrl_blk);
177}
178
179static int amixer_setup(struct amixer *amixer, struct rsc *input,
180 unsigned int scale, struct sum *sum)
181{
182 amixer_set_input(amixer, input);
183 amixer_set_y(amixer, scale);
184 amixer_set_sum(amixer, sum);
185 amixer_commit_write(amixer);
186 return 0;
187}
188
189static struct amixer_rsc_ops amixer_ops = {
190 .set_input = amixer_set_input,
191 .set_invalid_squash = amixer_set_invalid_squash,
192 .set_scale = amixer_set_y,
193 .set_sum = amixer_set_sum,
194 .commit_write = amixer_commit_write,
195 .commit_raw_write = amixer_commit_raw_write,
196 .setup = amixer_setup,
197 .get_scale = amixer_get_y,
198};
199
200static int amixer_rsc_init(struct amixer *amixer,
201 const struct amixer_desc *desc,
202 struct amixer_mgr *mgr)
203{
204 int err;
205
206 err = rsc_init(&amixer->rsc, amixer->idx[0],
207 AMIXER, desc->msr, mgr->mgr.hw);
208 if (err)
209 return err;
210
211 /* Set amixer specific operations */
212 amixer->rsc.ops = &amixer_basic_rsc_ops;
213 amixer->ops = &amixer_ops;
214 amixer->input = NULL;
215 amixer->sum = NULL;
216
217 amixer_setup(amixer, NULL, 0, NULL);
218
219 return 0;
220}
221
222static int amixer_rsc_uninit(struct amixer *amixer)
223{
224 amixer_setup(amixer, NULL, 0, NULL);
225 rsc_uninit(&amixer->rsc);
226 amixer->ops = NULL;
227 amixer->input = NULL;
228 amixer->sum = NULL;
229 return 0;
230}
231
232static int get_amixer_rsc(struct amixer_mgr *mgr,
233 const struct amixer_desc *desc,
234 struct amixer **ramixer)
235{
236 int err, i;
237 unsigned int idx;
238 struct amixer *amixer;
239 unsigned long flags;
240
241 *ramixer = NULL;
242
243 /* Allocate mem for amixer resource */
244 amixer = kzalloc(sizeof(*amixer), GFP_KERNEL);
245 if (NULL == amixer) {
246 err = -ENOMEM;
247 return err;
248 }
249
250 /* Check whether there are sufficient
251 * amixer resources to meet request. */
252 spin_lock_irqsave(&mgr->mgr_lock, flags);
253 for (i = 0; i < desc->msr; i++) {
254 err = mgr_get_resource(&mgr->mgr, 1, &idx);
255 if (err)
256 break;
257
258 amixer->idx[i] = idx;
259 }
260 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
261 if (err) {
262 printk(KERN_ERR "ctxfi: Can't meet AMIXER resource request!\n");
263 goto error;
264 }
265
266 err = amixer_rsc_init(amixer, desc, mgr);
267 if (err)
268 goto error;
269
270 *ramixer = amixer;
271
272 return 0;
273
274error:
275 spin_lock_irqsave(&mgr->mgr_lock, flags);
276 for (i--; i >= 0; i--)
277 mgr_put_resource(&mgr->mgr, 1, amixer->idx[i]);
278
279 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
280 kfree(amixer);
281 return err;
282}
283
284static int put_amixer_rsc(struct amixer_mgr *mgr, struct amixer *amixer)
285{
286 unsigned long flags;
287 int i;
288
289 spin_lock_irqsave(&mgr->mgr_lock, flags);
290 for (i = 0; i < amixer->rsc.msr; i++)
291 mgr_put_resource(&mgr->mgr, 1, amixer->idx[i]);
292
293 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
294 amixer_rsc_uninit(amixer);
295 kfree(amixer);
296
297 return 0;
298}
299
300int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr)
301{
302 int err;
303 struct amixer_mgr *amixer_mgr;
304
305 *ramixer_mgr = NULL;
306 amixer_mgr = kzalloc(sizeof(*amixer_mgr), GFP_KERNEL);
307 if (NULL == amixer_mgr)
308 return -ENOMEM;
309
310 err = rsc_mgr_init(&amixer_mgr->mgr, AMIXER, AMIXER_RESOURCE_NUM, hw);
311 if (err)
312 goto error;
313
314 spin_lock_init(&amixer_mgr->mgr_lock);
315
316 amixer_mgr->get_amixer = get_amixer_rsc;
317 amixer_mgr->put_amixer = put_amixer_rsc;
318
319 *ramixer_mgr = amixer_mgr;
320
321 return 0;
322
323error:
324 kfree(amixer_mgr);
325 return err;
326}
327
328int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
329{
330 rsc_mgr_uninit(&amixer_mgr->mgr);
331 kfree(amixer_mgr);
332 return 0;
333}
334
335/* SUM resource management */
336
337static int sum_master(struct rsc *rsc)
338{
339 rsc->conj = 0;
340 return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
341}
342
343static int sum_next_conj(struct rsc *rsc)
344{
345 rsc->conj++;
346 return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
347}
348
349static int sum_index(const struct rsc *rsc)
350{
351 return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
352}
353
354static int sum_output_slot(const struct rsc *rsc)
355{
356 return (sum_index(rsc) << 4) + 0xc;
357}
358
359static struct rsc_ops sum_basic_rsc_ops = {
360 .master = sum_master,
361 .next_conj = sum_next_conj,
362 .index = sum_index,
363 .output_slot = sum_output_slot,
364};
365
366static int sum_rsc_init(struct sum *sum,
367 const struct sum_desc *desc,
368 struct sum_mgr *mgr)
369{
370 int err;
371
372 err = rsc_init(&sum->rsc, sum->idx[0], SUM, desc->msr, mgr->mgr.hw);
373 if (err)
374 return err;
375
376 sum->rsc.ops = &sum_basic_rsc_ops;
377
378 return 0;
379}
380
381static int sum_rsc_uninit(struct sum *sum)
382{
383 rsc_uninit(&sum->rsc);
384 return 0;
385}
386
387static int get_sum_rsc(struct sum_mgr *mgr,
388 const struct sum_desc *desc,
389 struct sum **rsum)
390{
391 int err, i;
392 unsigned int idx;
393 struct sum *sum;
394 unsigned long flags;
395
396 *rsum = NULL;
397
398 /* Allocate mem for sum resource */
399 sum = kzalloc(sizeof(*sum), GFP_KERNEL);
400 if (NULL == sum) {
401 err = -ENOMEM;
402 return err;
403 }
404
405 /* Check whether there are sufficient sum resources to meet request. */
406 spin_lock_irqsave(&mgr->mgr_lock, flags);
407 for (i = 0; i < desc->msr; i++) {
408 err = mgr_get_resource(&mgr->mgr, 1, &idx);
409 if (err)
410 break;
411
412 sum->idx[i] = idx;
413 }
414 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
415 if (err) {
416 printk(KERN_ERR "ctxfi: Can't meet SUM resource request!\n");
417 goto error;
418 }
419
420 err = sum_rsc_init(sum, desc, mgr);
421 if (err)
422 goto error;
423
424 *rsum = sum;
425
426 return 0;
427
428error:
429 spin_lock_irqsave(&mgr->mgr_lock, flags);
430 for (i--; i >= 0; i--)
431 mgr_put_resource(&mgr->mgr, 1, sum->idx[i]);
432
433 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
434 kfree(sum);
435 return err;
436}
437
438static int put_sum_rsc(struct sum_mgr *mgr, struct sum *sum)
439{
440 unsigned long flags;
441 int i;
442
443 spin_lock_irqsave(&mgr->mgr_lock, flags);
444 for (i = 0; i < sum->rsc.msr; i++)
445 mgr_put_resource(&mgr->mgr, 1, sum->idx[i]);
446
447 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
448 sum_rsc_uninit(sum);
449 kfree(sum);
450
451 return 0;
452}
453
454int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr)
455{
456 int err;
457 struct sum_mgr *sum_mgr;
458
459 *rsum_mgr = NULL;
460 sum_mgr = kzalloc(sizeof(*sum_mgr), GFP_KERNEL);
461 if (NULL == sum_mgr)
462 return -ENOMEM;
463
464 err = rsc_mgr_init(&sum_mgr->mgr, SUM, SUM_RESOURCE_NUM, hw);
465 if (err)
466 goto error;
467
468 spin_lock_init(&sum_mgr->mgr_lock);
469
470 sum_mgr->get_sum = get_sum_rsc;
471 sum_mgr->put_sum = put_sum_rsc;
472
473 *rsum_mgr = sum_mgr;
474
475 return 0;
476
477error:
478 kfree(sum_mgr);
479 return err;
480}
481
482int sum_mgr_destroy(struct sum_mgr *sum_mgr)
483{
484 rsc_mgr_uninit(&sum_mgr->mgr);
485 kfree(sum_mgr);
486 return 0;
487}
488
diff --git a/sound/pci/ctxfi/ctamixer.h b/sound/pci/ctxfi/ctamixer.h
new file mode 100644
index 000000000000..cc49e5ab4750
--- /dev/null
+++ b/sound/pci/ctxfi/ctamixer.h
@@ -0,0 +1,96 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctamixer.h
9 *
10 * @Brief
11 * This file contains the definition of the Audio Mixer
12 * resource management object.
13 *
14 * @Author Liu Chun
15 * @Date May 21 2008
16 *
17 */
18
19#ifndef CTAMIXER_H
20#define CTAMIXER_H
21
22#include "ctresource.h"
23#include <linux/spinlock.h>
24
25/* Define the descriptor of a summation node resource */
26struct sum {
27 struct rsc rsc; /* Basic resource info */
28 unsigned char idx[8];
29};
30
31/* Define sum resource request description info */
32struct sum_desc {
33 unsigned int msr;
34};
35
36struct sum_mgr {
37 struct rsc_mgr mgr; /* Basic resource manager info */
38 spinlock_t mgr_lock;
39
40 /* request one sum resource */
41 int (*get_sum)(struct sum_mgr *mgr,
42 const struct sum_desc *desc, struct sum **rsum);
43 /* return one sum resource */
44 int (*put_sum)(struct sum_mgr *mgr, struct sum *sum);
45};
46
47/* Constructor and destructor of daio resource manager */
48int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr);
49int sum_mgr_destroy(struct sum_mgr *sum_mgr);
50
51/* Define the descriptor of a amixer resource */
52struct amixer_rsc_ops;
53
54struct amixer {
55 struct rsc rsc; /* Basic resource info */
56 unsigned char idx[8];
57 struct rsc *input; /* pointer to a resource acting as source */
58 struct sum *sum; /* Put amixer output to this summation node */
59 struct amixer_rsc_ops *ops; /* AMixer specific operations */
60};
61
62struct amixer_rsc_ops {
63 int (*set_input)(struct amixer *amixer, struct rsc *rsc);
64 int (*set_scale)(struct amixer *amixer, unsigned int scale);
65 int (*set_invalid_squash)(struct amixer *amixer, unsigned int iv);
66 int (*set_sum)(struct amixer *amixer, struct sum *sum);
67 int (*commit_write)(struct amixer *amixer);
68 /* Only for interleaved recording */
69 int (*commit_raw_write)(struct amixer *amixer);
70 int (*setup)(struct amixer *amixer, struct rsc *input,
71 unsigned int scale, struct sum *sum);
72 int (*get_scale)(struct amixer *amixer);
73};
74
75/* Define amixer resource request description info */
76struct amixer_desc {
77 unsigned int msr;
78};
79
80struct amixer_mgr {
81 struct rsc_mgr mgr; /* Basic resource manager info */
82 spinlock_t mgr_lock;
83
84 /* request one amixer resource */
85 int (*get_amixer)(struct amixer_mgr *mgr,
86 const struct amixer_desc *desc,
87 struct amixer **ramixer);
88 /* return one amixer resource */
89 int (*put_amixer)(struct amixer_mgr *mgr, struct amixer *amixer);
90};
91
92/* Constructor and destructor of amixer resource manager */
93int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr);
94int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr);
95
96#endif /* CTAMIXER_H */
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
new file mode 100644
index 000000000000..80fb2baed7a7
--- /dev/null
+++ b/sound/pci/ctxfi/ctatc.c
@@ -0,0 +1,1619 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctatc.c
9 *
10 * @Brief
11 * This file contains the implementation of the device resource management
12 * object.
13 *
14 * @Author Liu Chun
15 * @Date Mar 28 2008
16 */
17
18#include "ctatc.h"
19#include "ctpcm.h"
20#include "ctmixer.h"
21#include "cthardware.h"
22#include "ctsrc.h"
23#include "ctamixer.h"
24#include "ctdaio.h"
25#include "cttimer.h"
26#include <linux/delay.h>
27#include <sound/pcm.h>
28#include <sound/control.h>
29#include <sound/asoundef.h>
30
31#define MONO_SUM_SCALE 0x19a8 /* 2^(-0.5) in 14-bit floating format */
32#define DAIONUM 7
33#define MAX_MULTI_CHN 8
34
35#define IEC958_DEFAULT_CON ((IEC958_AES0_NONAUDIO \
36 | IEC958_AES0_CON_NOT_COPYRIGHT) \
37 | ((IEC958_AES1_CON_MIXER \
38 | IEC958_AES1_CON_ORIGINAL) << 8) \
39 | (0x10 << 16) \
40 | ((IEC958_AES3_CON_FS_48000) << 24))
41
42static struct snd_pci_quirk __devinitdata subsys_20k1_list[] = {
43 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X),
44 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X),
45 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X),
46 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0031, "SB073x", CTSB073X),
47 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_CREATIVE, 0xf000, 0x6000,
48 "UAA", CTUAA),
49 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_CREATIVE,
50 "Unknown", CT20K1_UNKNOWN),
51 { } /* terminator */
52};
53
54static struct snd_pci_quirk __devinitdata subsys_20k2_list[] = {
55 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB0760,
56 "SB0760", CTSB0760),
57 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB08801,
58 "SB0880", CTSB0880),
59 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB08802,
60 "SB0880", CTSB0880),
61 SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB08803,
62 "SB0880", CTSB0880),
63 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_CREATIVE, 0xf000,
64 PCI_SUBDEVICE_ID_CREATIVE_HENDRIX, "HENDRIX",
65 CTHENDRIX),
66 { } /* terminator */
67};
68
69static const char *ct_subsys_name[NUM_CTCARDS] = {
70 [CTSB055X] = "SB055x",
71 [CTSB073X] = "SB073x",
72 [CTSB0760] = "SB076x",
73 [CTUAA] = "UAA",
74 [CT20K1_UNKNOWN] = "Unknown",
75 [CTHENDRIX] = "Hendrix",
76 [CTSB0880] = "SB0880",
77};
78
79static struct {
80 int (*create)(struct ct_atc *atc,
81 enum CTALSADEVS device, const char *device_name);
82 int (*destroy)(void *alsa_dev);
83 const char *public_name;
84} alsa_dev_funcs[NUM_CTALSADEVS] = {
85 [FRONT] = { .create = ct_alsa_pcm_create,
86 .destroy = NULL,
87 .public_name = "Front/WaveIn"},
88 [SURROUND] = { .create = ct_alsa_pcm_create,
89 .destroy = NULL,
90 .public_name = "Surround"},
91 [CLFE] = { .create = ct_alsa_pcm_create,
92 .destroy = NULL,
93 .public_name = "Center/LFE"},
94 [SIDE] = { .create = ct_alsa_pcm_create,
95 .destroy = NULL,
96 .public_name = "Side"},
97 [IEC958] = { .create = ct_alsa_pcm_create,
98 .destroy = NULL,
99 .public_name = "IEC958 Non-audio"},
100
101 [MIXER] = { .create = ct_alsa_mix_create,
102 .destroy = NULL,
103 .public_name = "Mixer"}
104};
105
106typedef int (*create_t)(void *, void **);
107typedef int (*destroy_t)(void *);
108
109static struct {
110 int (*create)(void *hw, void **rmgr);
111 int (*destroy)(void *mgr);
112} rsc_mgr_funcs[NUM_RSCTYP] = {
113 [SRC] = { .create = (create_t)src_mgr_create,
114 .destroy = (destroy_t)src_mgr_destroy },
115 [SRCIMP] = { .create = (create_t)srcimp_mgr_create,
116 .destroy = (destroy_t)srcimp_mgr_destroy },
117 [AMIXER] = { .create = (create_t)amixer_mgr_create,
118 .destroy = (destroy_t)amixer_mgr_destroy },
119 [SUM] = { .create = (create_t)sum_mgr_create,
120 .destroy = (destroy_t)sum_mgr_destroy },
121 [DAIO] = { .create = (create_t)daio_mgr_create,
122 .destroy = (destroy_t)daio_mgr_destroy }
123};
124
125static int
126atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm);
127
128/* *
129 * Only mono and interleaved modes are supported now.
130 * Always allocates a contiguous channel block.
131 * */
132
133static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
134{
135 struct snd_pcm_runtime *runtime;
136 struct ct_vm *vm;
137
138 if (NULL == apcm->substream)
139 return 0;
140
141 runtime = apcm->substream->runtime;
142 vm = atc->vm;
143
144 apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes);
145
146 if (NULL == apcm->vm_block)
147 return -ENOENT;
148
149 return 0;
150}
151
152static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
153{
154 struct ct_vm *vm;
155
156 if (NULL == apcm->vm_block)
157 return;
158
159 vm = atc->vm;
160
161 vm->unmap(vm, apcm->vm_block);
162
163 apcm->vm_block = NULL;
164}
165
166static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
167{
168 struct ct_vm *vm;
169 void *kvirt_addr;
170 unsigned long phys_addr;
171
172 vm = atc->vm;
173 kvirt_addr = vm->get_ptp_virt(vm, index);
174 if (kvirt_addr == NULL)
175 phys_addr = (~0UL);
176 else
177 phys_addr = virt_to_phys(kvirt_addr);
178
179 return phys_addr;
180}
181
182static unsigned int convert_format(snd_pcm_format_t snd_format)
183{
184 switch (snd_format) {
185 case SNDRV_PCM_FORMAT_U8:
186 return SRC_SF_U8;
187 case SNDRV_PCM_FORMAT_S16_LE:
188 return SRC_SF_S16;
189 case SNDRV_PCM_FORMAT_S24_3LE:
190 return SRC_SF_S24;
191 case SNDRV_PCM_FORMAT_S32_LE:
192 return SRC_SF_S32;
193 case SNDRV_PCM_FORMAT_FLOAT_LE:
194 return SRC_SF_F32;
195 default:
196 printk(KERN_ERR "ctxfi: not recognized snd format is %d \n",
197 snd_format);
198 return SRC_SF_S16;
199 }
200}
201
202static unsigned int
203atc_get_pitch(unsigned int input_rate, unsigned int output_rate)
204{
205 unsigned int pitch;
206 int b;
207
208 /* get pitch and convert to fixed-point 8.24 format. */
209 pitch = (input_rate / output_rate) << 24;
210 input_rate %= output_rate;
211 input_rate /= 100;
212 output_rate /= 100;
213 for (b = 31; ((b >= 0) && !(input_rate >> b)); )
214 b--;
215
216 if (b >= 0) {
217 input_rate <<= (31 - b);
218 input_rate /= output_rate;
219 b = 24 - (31 - b);
220 if (b >= 0)
221 input_rate <<= b;
222 else
223 input_rate >>= -b;
224
225 pitch |= input_rate;
226 }
227
228 return pitch;
229}
230
231static int select_rom(unsigned int pitch)
232{
233 if ((pitch > 0x00428f5c) && (pitch < 0x01b851ec)) {
234 /* 0.26 <= pitch <= 1.72 */
235 return 1;
236 } else if ((0x01d66666 == pitch) || (0x01d66667 == pitch)) {
237 /* pitch == 1.8375 */
238 return 2;
239 } else if (0x02000000 == pitch) {
240 /* pitch == 2 */
241 return 3;
242 } else if ((pitch >= 0x0) && (pitch <= 0x08000000)) {
243 /* 0 <= pitch <= 8 */
244 return 0;
245 } else {
246 return -ENOENT;
247 }
248}
249
250static int atc_pcm_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
251{
252 struct src_mgr *src_mgr = atc->rsc_mgrs[SRC];
253 struct amixer_mgr *amixer_mgr = atc->rsc_mgrs[AMIXER];
254 struct src_desc desc = {0};
255 struct amixer_desc mix_dsc = {0};
256 struct src *src;
257 struct amixer *amixer;
258 int err;
259 int n_amixer = apcm->substream->runtime->channels, i = 0;
260 int device = apcm->substream->pcm->device;
261 unsigned int pitch;
262 unsigned long flags;
263
264 if (NULL != apcm->src) {
265 /* Prepared pcm playback */
266 return 0;
267 }
268
269 /* first release old resources */
270 atc->pcm_release_resources(atc, apcm);
271
272 /* Get SRC resource */
273 desc.multi = apcm->substream->runtime->channels;
274 desc.msr = atc->msr;
275 desc.mode = MEMRD;
276 err = src_mgr->get_src(src_mgr, &desc, (struct src **)&apcm->src);
277 if (err)
278 goto error1;
279
280 pitch = atc_get_pitch(apcm->substream->runtime->rate,
281 (atc->rsr * atc->msr));
282 src = apcm->src;
283 src->ops->set_pitch(src, pitch);
284 src->ops->set_rom(src, select_rom(pitch));
285 src->ops->set_sf(src, convert_format(apcm->substream->runtime->format));
286 src->ops->set_pm(src, (src->ops->next_interleave(src) != NULL));
287
288 /* Get AMIXER resource */
289 n_amixer = (n_amixer < 2) ? 2 : n_amixer;
290 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
291 if (NULL == apcm->amixers) {
292 err = -ENOMEM;
293 goto error1;
294 }
295 mix_dsc.msr = atc->msr;
296 for (i = 0, apcm->n_amixer = 0; i < n_amixer; i++) {
297 err = amixer_mgr->get_amixer(amixer_mgr, &mix_dsc,
298 (struct amixer **)&apcm->amixers[i]);
299 if (err)
300 goto error1;
301
302 apcm->n_amixer++;
303 }
304
305 /* Set up device virtual mem map */
306 err = ct_map_audio_buffer(atc, apcm);
307 if (err < 0)
308 goto error1;
309
310 /* Connect resources */
311 src = apcm->src;
312 for (i = 0; i < n_amixer; i++) {
313 amixer = apcm->amixers[i];
314 spin_lock_irqsave(&atc->atc_lock, flags);
315 amixer->ops->setup(amixer, &src->rsc,
316 INIT_VOL, atc->pcm[i+device*2]);
317 spin_unlock_irqrestore(&atc->atc_lock, flags);
318 src = src->ops->next_interleave(src);
319 if (NULL == src)
320 src = apcm->src;
321 }
322
323 ct_timer_prepare(apcm->timer);
324
325 return 0;
326
327error1:
328 atc_pcm_release_resources(atc, apcm);
329 return err;
330}
331
332static int
333atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
334{
335 struct src_mgr *src_mgr = atc->rsc_mgrs[SRC];
336 struct srcimp_mgr *srcimp_mgr = atc->rsc_mgrs[SRCIMP];
337 struct amixer_mgr *amixer_mgr = atc->rsc_mgrs[AMIXER];
338 struct sum_mgr *sum_mgr = atc->rsc_mgrs[SUM];
339 struct srcimp *srcimp;
340 int i;
341
342 if (NULL != apcm->srcimps) {
343 for (i = 0; i < apcm->n_srcimp; i++) {
344 srcimp = apcm->srcimps[i];
345 srcimp->ops->unmap(srcimp);
346 srcimp_mgr->put_srcimp(srcimp_mgr, srcimp);
347 apcm->srcimps[i] = NULL;
348 }
349 kfree(apcm->srcimps);
350 apcm->srcimps = NULL;
351 }
352
353 if (NULL != apcm->srccs) {
354 for (i = 0; i < apcm->n_srcc; i++) {
355 src_mgr->put_src(src_mgr, apcm->srccs[i]);
356 apcm->srccs[i] = NULL;
357 }
358 kfree(apcm->srccs);
359 apcm->srccs = NULL;
360 }
361
362 if (NULL != apcm->amixers) {
363 for (i = 0; i < apcm->n_amixer; i++) {
364 amixer_mgr->put_amixer(amixer_mgr, apcm->amixers[i]);
365 apcm->amixers[i] = NULL;
366 }
367 kfree(apcm->amixers);
368 apcm->amixers = NULL;
369 }
370
371 if (NULL != apcm->mono) {
372 sum_mgr->put_sum(sum_mgr, apcm->mono);
373 apcm->mono = NULL;
374 }
375
376 if (NULL != apcm->src) {
377 src_mgr->put_src(src_mgr, apcm->src);
378 apcm->src = NULL;
379 }
380
381 if (NULL != apcm->vm_block) {
382 /* Undo device virtual mem map */
383 ct_unmap_audio_buffer(atc, apcm);
384 apcm->vm_block = NULL;
385 }
386
387 return 0;
388}
389
390static int atc_pcm_playback_start(struct ct_atc *atc, struct ct_atc_pcm *apcm)
391{
392 unsigned int max_cisz;
393 struct src *src = apcm->src;
394
395 if (apcm->started)
396 return 0;
397 apcm->started = 1;
398
399 max_cisz = src->multi * src->rsc.msr;
400 max_cisz = 0x80 * (max_cisz < 8 ? max_cisz : 8);
401
402 src->ops->set_sa(src, apcm->vm_block->addr);
403 src->ops->set_la(src, apcm->vm_block->addr + apcm->vm_block->size);
404 src->ops->set_ca(src, apcm->vm_block->addr + max_cisz);
405 src->ops->set_cisz(src, max_cisz);
406
407 src->ops->set_bm(src, 1);
408 src->ops->set_state(src, SRC_STATE_INIT);
409 src->ops->commit_write(src);
410
411 ct_timer_start(apcm->timer);
412 return 0;
413}
414
415static int atc_pcm_stop(struct ct_atc *atc, struct ct_atc_pcm *apcm)
416{
417 struct src *src;
418 int i;
419
420 ct_timer_stop(apcm->timer);
421
422 src = apcm->src;
423 src->ops->set_bm(src, 0);
424 src->ops->set_state(src, SRC_STATE_OFF);
425 src->ops->commit_write(src);
426
427 if (NULL != apcm->srccs) {
428 for (i = 0; i < apcm->n_srcc; i++) {
429 src = apcm->srccs[i];
430 src->ops->set_bm(src, 0);
431 src->ops->set_state(src, SRC_STATE_OFF);
432 src->ops->commit_write(src);
433 }
434 }
435
436 apcm->started = 0;
437
438 return 0;
439}
440
441static int
442atc_pcm_playback_position(struct ct_atc *atc, struct ct_atc_pcm *apcm)
443{
444 struct src *src = apcm->src;
445 u32 size, max_cisz;
446 int position;
447
448 if (!src)
449 return 0;
450 position = src->ops->get_ca(src);
451
452 size = apcm->vm_block->size;
453 max_cisz = src->multi * src->rsc.msr;
454 max_cisz = 128 * (max_cisz < 8 ? max_cisz : 8);
455
456 return (position + size - max_cisz - apcm->vm_block->addr) % size;
457}
458
459struct src_node_conf_t {
460 unsigned int pitch;
461 unsigned int msr:8;
462 unsigned int mix_msr:8;
463 unsigned int imp_msr:8;
464 unsigned int vo:1;
465};
466
467static void setup_src_node_conf(struct ct_atc *atc, struct ct_atc_pcm *apcm,
468 struct src_node_conf_t *conf, int *n_srcc)
469{
470 unsigned int pitch;
471
472 /* get pitch and convert to fixed-point 8.24 format. */
473 pitch = atc_get_pitch((atc->rsr * atc->msr),
474 apcm->substream->runtime->rate);
475 *n_srcc = 0;
476
477 if (1 == atc->msr) {
478 *n_srcc = apcm->substream->runtime->channels;
479 conf[0].pitch = pitch;
480 conf[0].mix_msr = conf[0].imp_msr = conf[0].msr = 1;
481 conf[0].vo = 1;
482 } else if (2 == atc->msr) {
483 if (0x8000000 < pitch) {
484 /* Need two-stage SRCs, SRCIMPs and
485 * AMIXERs for converting format */
486 conf[0].pitch = (atc->msr << 24);
487 conf[0].msr = conf[0].mix_msr = 1;
488 conf[0].imp_msr = atc->msr;
489 conf[0].vo = 0;
490 conf[1].pitch = atc_get_pitch(atc->rsr,
491 apcm->substream->runtime->rate);
492 conf[1].msr = conf[1].mix_msr = conf[1].imp_msr = 1;
493 conf[1].vo = 1;
494 *n_srcc = apcm->substream->runtime->channels * 2;
495 } else if (0x1000000 < pitch) {
496 /* Need one-stage SRCs, SRCIMPs and
497 * AMIXERs for converting format */
498 conf[0].pitch = pitch;
499 conf[0].msr = conf[0].mix_msr
500 = conf[0].imp_msr = atc->msr;
501 conf[0].vo = 1;
502 *n_srcc = apcm->substream->runtime->channels;
503 }
504 }
505}
506
507static int
508atc_pcm_capture_get_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
509{
510 struct src_mgr *src_mgr = atc->rsc_mgrs[SRC];
511 struct srcimp_mgr *srcimp_mgr = atc->rsc_mgrs[SRCIMP];
512 struct amixer_mgr *amixer_mgr = atc->rsc_mgrs[AMIXER];
513 struct sum_mgr *sum_mgr = atc->rsc_mgrs[SUM];
514 struct src_desc src_dsc = {0};
515 struct src *src;
516 struct srcimp_desc srcimp_dsc = {0};
517 struct srcimp *srcimp;
518 struct amixer_desc mix_dsc = {0};
519 struct sum_desc sum_dsc = {0};
520 unsigned int pitch;
521 int multi, err, i;
522 int n_srcimp, n_amixer, n_srcc, n_sum;
523 struct src_node_conf_t src_node_conf[2] = {{0} };
524
525 /* first release old resources */
526 atc_pcm_release_resources(atc, apcm);
527
528 /* The numbers of converting SRCs and SRCIMPs should be determined
529 * by pitch value. */
530
531 multi = apcm->substream->runtime->channels;
532
533 /* get pitch and convert to fixed-point 8.24 format. */
534 pitch = atc_get_pitch((atc->rsr * atc->msr),
535 apcm->substream->runtime->rate);
536
537 setup_src_node_conf(atc, apcm, src_node_conf, &n_srcc);
538 n_sum = (1 == multi) ? 1 : 0;
539 n_amixer = n_sum * 2 + n_srcc;
540 n_srcimp = n_srcc;
541 if ((multi > 1) && (0x8000000 >= pitch)) {
542 /* Need extra AMIXERs and SRCIMPs for special treatment
543 * of interleaved recording of conjugate channels */
544 n_amixer += multi * atc->msr;
545 n_srcimp += multi * atc->msr;
546 } else {
547 n_srcimp += multi;
548 }
549
550 if (n_srcc) {
551 apcm->srccs = kzalloc(sizeof(void *)*n_srcc, GFP_KERNEL);
552 if (NULL == apcm->srccs)
553 return -ENOMEM;
554 }
555 if (n_amixer) {
556 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
557 if (NULL == apcm->amixers) {
558 err = -ENOMEM;
559 goto error1;
560 }
561 }
562 apcm->srcimps = kzalloc(sizeof(void *)*n_srcimp, GFP_KERNEL);
563 if (NULL == apcm->srcimps) {
564 err = -ENOMEM;
565 goto error1;
566 }
567
568 /* Allocate SRCs for sample rate conversion if needed */
569 src_dsc.multi = 1;
570 src_dsc.mode = ARCRW;
571 for (i = 0, apcm->n_srcc = 0; i < n_srcc; i++) {
572 src_dsc.msr = src_node_conf[i/multi].msr;
573 err = src_mgr->get_src(src_mgr, &src_dsc,
574 (struct src **)&apcm->srccs[i]);
575 if (err)
576 goto error1;
577
578 src = apcm->srccs[i];
579 pitch = src_node_conf[i/multi].pitch;
580 src->ops->set_pitch(src, pitch);
581 src->ops->set_rom(src, select_rom(pitch));
582 src->ops->set_vo(src, src_node_conf[i/multi].vo);
583
584 apcm->n_srcc++;
585 }
586
587 /* Allocate AMIXERs for routing SRCs of conversion if needed */
588 for (i = 0, apcm->n_amixer = 0; i < n_amixer; i++) {
589 if (i < (n_sum*2))
590 mix_dsc.msr = atc->msr;
591 else if (i < (n_sum*2+n_srcc))
592 mix_dsc.msr = src_node_conf[(i-n_sum*2)/multi].mix_msr;
593 else
594 mix_dsc.msr = 1;
595
596 err = amixer_mgr->get_amixer(amixer_mgr, &mix_dsc,
597 (struct amixer **)&apcm->amixers[i]);
598 if (err)
599 goto error1;
600
601 apcm->n_amixer++;
602 }
603
604 /* Allocate a SUM resource to mix all input channels together */
605 sum_dsc.msr = atc->msr;
606 err = sum_mgr->get_sum(sum_mgr, &sum_dsc, (struct sum **)&apcm->mono);
607 if (err)
608 goto error1;
609
610 pitch = atc_get_pitch((atc->rsr * atc->msr),
611 apcm->substream->runtime->rate);
612 /* Allocate SRCIMP resources */
613 for (i = 0, apcm->n_srcimp = 0; i < n_srcimp; i++) {
614 if (i < (n_srcc))
615 srcimp_dsc.msr = src_node_conf[i/multi].imp_msr;
616 else if (1 == multi)
617 srcimp_dsc.msr = (pitch <= 0x8000000) ? atc->msr : 1;
618 else
619 srcimp_dsc.msr = 1;
620
621 err = srcimp_mgr->get_srcimp(srcimp_mgr, &srcimp_dsc, &srcimp);
622 if (err)
623 goto error1;
624
625 apcm->srcimps[i] = srcimp;
626 apcm->n_srcimp++;
627 }
628
629 /* Allocate a SRC for writing data to host memory */
630 src_dsc.multi = apcm->substream->runtime->channels;
631 src_dsc.msr = 1;
632 src_dsc.mode = MEMWR;
633 err = src_mgr->get_src(src_mgr, &src_dsc, (struct src **)&apcm->src);
634 if (err)
635 goto error1;
636
637 src = apcm->src;
638 src->ops->set_pitch(src, pitch);
639
640 /* Set up device virtual mem map */
641 err = ct_map_audio_buffer(atc, apcm);
642 if (err < 0)
643 goto error1;
644
645 return 0;
646
647error1:
648 atc_pcm_release_resources(atc, apcm);
649 return err;
650}
651
652static int atc_pcm_capture_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
653{
654 struct src *src;
655 struct amixer *amixer;
656 struct srcimp *srcimp;
657 struct ct_mixer *mixer = atc->mixer;
658 struct sum *mono;
659 struct rsc *out_ports[8] = {NULL};
660 int err, i, j, n_sum, multi;
661 unsigned int pitch;
662 int mix_base = 0, imp_base = 0;
663
664 if (NULL != apcm->src) {
665 /* Prepared pcm capture */
666 return 0;
667 }
668
669 /* Get needed resources. */
670 err = atc_pcm_capture_get_resources(atc, apcm);
671 if (err)
672 return err;
673
674 /* Connect resources */
675 mixer->get_output_ports(mixer, MIX_PCMO_FRONT,
676 &out_ports[0], &out_ports[1]);
677
678 multi = apcm->substream->runtime->channels;
679 if (1 == multi) {
680 mono = apcm->mono;
681 for (i = 0; i < 2; i++) {
682 amixer = apcm->amixers[i];
683 amixer->ops->setup(amixer, out_ports[i],
684 MONO_SUM_SCALE, mono);
685 }
686 out_ports[0] = &mono->rsc;
687 n_sum = 1;
688 mix_base = n_sum * 2;
689 }
690
691 for (i = 0; i < apcm->n_srcc; i++) {
692 src = apcm->srccs[i];
693 srcimp = apcm->srcimps[imp_base+i];
694 amixer = apcm->amixers[mix_base+i];
695 srcimp->ops->map(srcimp, src, out_ports[i%multi]);
696 amixer->ops->setup(amixer, &src->rsc, INIT_VOL, NULL);
697 out_ports[i%multi] = &amixer->rsc;
698 }
699
700 pitch = atc_get_pitch((atc->rsr * atc->msr),
701 apcm->substream->runtime->rate);
702
703 if ((multi > 1) && (pitch <= 0x8000000)) {
704 /* Special connection for interleaved
705 * recording with conjugate channels */
706 for (i = 0; i < multi; i++) {
707 out_ports[i]->ops->master(out_ports[i]);
708 for (j = 0; j < atc->msr; j++) {
709 amixer = apcm->amixers[apcm->n_srcc+j*multi+i];
710 amixer->ops->set_input(amixer, out_ports[i]);
711 amixer->ops->set_scale(amixer, INIT_VOL);
712 amixer->ops->set_sum(amixer, NULL);
713 amixer->ops->commit_raw_write(amixer);
714 out_ports[i]->ops->next_conj(out_ports[i]);
715
716 srcimp = apcm->srcimps[apcm->n_srcc+j*multi+i];
717 srcimp->ops->map(srcimp, apcm->src,
718 &amixer->rsc);
719 }
720 }
721 } else {
722 for (i = 0; i < multi; i++) {
723 srcimp = apcm->srcimps[apcm->n_srcc+i];
724 srcimp->ops->map(srcimp, apcm->src, out_ports[i]);
725 }
726 }
727
728 ct_timer_prepare(apcm->timer);
729
730 return 0;
731}
732
733static int atc_pcm_capture_start(struct ct_atc *atc, struct ct_atc_pcm *apcm)
734{
735 struct src *src;
736 struct src_mgr *src_mgr = atc->rsc_mgrs[SRC];
737 int i, multi;
738
739 if (apcm->started)
740 return 0;
741
742 apcm->started = 1;
743 multi = apcm->substream->runtime->channels;
744 /* Set up converting SRCs */
745 for (i = 0; i < apcm->n_srcc; i++) {
746 src = apcm->srccs[i];
747 src->ops->set_pm(src, ((i%multi) != (multi-1)));
748 src_mgr->src_disable(src_mgr, src);
749 }
750
751 /* Set up recording SRC */
752 src = apcm->src;
753 src->ops->set_sf(src, convert_format(apcm->substream->runtime->format));
754 src->ops->set_sa(src, apcm->vm_block->addr);
755 src->ops->set_la(src, apcm->vm_block->addr + apcm->vm_block->size);
756 src->ops->set_ca(src, apcm->vm_block->addr);
757 src_mgr->src_disable(src_mgr, src);
758
759 /* Disable relevant SRCs firstly */
760 src_mgr->commit_write(src_mgr);
761
762 /* Enable SRCs respectively */
763 for (i = 0; i < apcm->n_srcc; i++) {
764 src = apcm->srccs[i];
765 src->ops->set_state(src, SRC_STATE_RUN);
766 src->ops->commit_write(src);
767 src_mgr->src_enable_s(src_mgr, src);
768 }
769 src = apcm->src;
770 src->ops->set_bm(src, 1);
771 src->ops->set_state(src, SRC_STATE_RUN);
772 src->ops->commit_write(src);
773 src_mgr->src_enable_s(src_mgr, src);
774
775 /* Enable relevant SRCs synchronously */
776 src_mgr->commit_write(src_mgr);
777
778 ct_timer_start(apcm->timer);
779 return 0;
780}
781
782static int
783atc_pcm_capture_position(struct ct_atc *atc, struct ct_atc_pcm *apcm)
784{
785 struct src *src = apcm->src;
786
787 if (!src)
788 return 0;
789 return src->ops->get_ca(src) - apcm->vm_block->addr;
790}
791
792static int spdif_passthru_playback_get_resources(struct ct_atc *atc,
793 struct ct_atc_pcm *apcm)
794{
795 struct src_mgr *src_mgr = atc->rsc_mgrs[SRC];
796 struct amixer_mgr *amixer_mgr = atc->rsc_mgrs[AMIXER];
797 struct src_desc desc = {0};
798 struct amixer_desc mix_dsc = {0};
799 struct src *src;
800 int err;
801 int n_amixer = apcm->substream->runtime->channels, i;
802 unsigned int pitch, rsr = atc->pll_rate;
803
804 /* first release old resources */
805 atc_pcm_release_resources(atc, apcm);
806
807 /* Get SRC resource */
808 desc.multi = apcm->substream->runtime->channels;
809 desc.msr = 1;
810 while (apcm->substream->runtime->rate > (rsr * desc.msr))
811 desc.msr <<= 1;
812
813 desc.mode = MEMRD;
814 err = src_mgr->get_src(src_mgr, &desc, (struct src **)&apcm->src);
815 if (err)
816 goto error1;
817
818 pitch = atc_get_pitch(apcm->substream->runtime->rate, (rsr * desc.msr));
819 src = apcm->src;
820 src->ops->set_pitch(src, pitch);
821 src->ops->set_rom(src, select_rom(pitch));
822 src->ops->set_sf(src, convert_format(apcm->substream->runtime->format));
823 src->ops->set_pm(src, (src->ops->next_interleave(src) != NULL));
824 src->ops->set_bp(src, 1);
825
826 /* Get AMIXER resource */
827 n_amixer = (n_amixer < 2) ? 2 : n_amixer;
828 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
829 if (NULL == apcm->amixers) {
830 err = -ENOMEM;
831 goto error1;
832 }
833 mix_dsc.msr = desc.msr;
834 for (i = 0, apcm->n_amixer = 0; i < n_amixer; i++) {
835 err = amixer_mgr->get_amixer(amixer_mgr, &mix_dsc,
836 (struct amixer **)&apcm->amixers[i]);
837 if (err)
838 goto error1;
839
840 apcm->n_amixer++;
841 }
842
843 /* Set up device virtual mem map */
844 err = ct_map_audio_buffer(atc, apcm);
845 if (err < 0)
846 goto error1;
847
848 return 0;
849
850error1:
851 atc_pcm_release_resources(atc, apcm);
852 return err;
853}
854
855static int atc_pll_init(struct ct_atc *atc, int rate)
856{
857 struct hw *hw = atc->hw;
858 int err;
859 err = hw->pll_init(hw, rate);
860 atc->pll_rate = err ? 0 : rate;
861 return err;
862}
863
864static int
865spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm)
866{
867 struct dao *dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
868 unsigned long flags;
869 unsigned int rate = apcm->substream->runtime->rate;
870 unsigned int status;
871 int err;
872 unsigned char iec958_con_fs;
873
874 switch (rate) {
875 case 48000:
876 iec958_con_fs = IEC958_AES3_CON_FS_48000;
877 break;
878 case 44100:
879 iec958_con_fs = IEC958_AES3_CON_FS_44100;
880 break;
881 case 32000:
882 iec958_con_fs = IEC958_AES3_CON_FS_32000;
883 break;
884 default:
885 return -ENOENT;
886 }
887
888 spin_lock_irqsave(&atc->atc_lock, flags);
889 dao->ops->get_spos(dao, &status);
890 if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) {
891 status &= ((~IEC958_AES3_CON_FS) << 24);
892 status |= (iec958_con_fs << 24);
893 dao->ops->set_spos(dao, status);
894 dao->ops->commit_write(dao);
895 }
896 if ((rate != atc->pll_rate) && (32000 != rate))
897 err = atc_pll_init(atc, rate);
898 spin_unlock_irqrestore(&atc->atc_lock, flags);
899
900 return err;
901}
902
903static int
904spdif_passthru_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
905{
906 struct src *src;
907 struct amixer *amixer;
908 struct dao *dao;
909 int err;
910 int i;
911 unsigned long flags;
912
913 if (NULL != apcm->src)
914 return 0;
915
916 /* Configure SPDIFOO and PLL to passthrough mode;
917 * determine pll_rate. */
918 err = spdif_passthru_playback_setup(atc, apcm);
919 if (err)
920 return err;
921
922 /* Get needed resources. */
923 err = spdif_passthru_playback_get_resources(atc, apcm);
924 if (err)
925 return err;
926
927 /* Connect resources */
928 src = apcm->src;
929 for (i = 0; i < apcm->n_amixer; i++) {
930 amixer = apcm->amixers[i];
931 amixer->ops->setup(amixer, &src->rsc, INIT_VOL, NULL);
932 src = src->ops->next_interleave(src);
933 if (NULL == src)
934 src = apcm->src;
935 }
936 /* Connect to SPDIFOO */
937 spin_lock_irqsave(&atc->atc_lock, flags);
938 dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
939 amixer = apcm->amixers[0];
940 dao->ops->set_left_input(dao, &amixer->rsc);
941 amixer = apcm->amixers[1];
942 dao->ops->set_right_input(dao, &amixer->rsc);
943 spin_unlock_irqrestore(&atc->atc_lock, flags);
944
945 ct_timer_prepare(apcm->timer);
946
947 return 0;
948}
949
950static int atc_select_line_in(struct ct_atc *atc)
951{
952 struct hw *hw = atc->hw;
953 struct ct_mixer *mixer = atc->mixer;
954 struct src *src;
955
956 if (hw->is_adc_source_selected(hw, ADC_LINEIN))
957 return 0;
958
959 mixer->set_input_left(mixer, MIX_MIC_IN, NULL);
960 mixer->set_input_right(mixer, MIX_MIC_IN, NULL);
961
962 hw->select_adc_source(hw, ADC_LINEIN);
963
964 src = atc->srcs[2];
965 mixer->set_input_left(mixer, MIX_LINE_IN, &src->rsc);
966 src = atc->srcs[3];
967 mixer->set_input_right(mixer, MIX_LINE_IN, &src->rsc);
968
969 return 0;
970}
971
972static int atc_select_mic_in(struct ct_atc *atc)
973{
974 struct hw *hw = atc->hw;
975 struct ct_mixer *mixer = atc->mixer;
976 struct src *src;
977
978 if (hw->is_adc_source_selected(hw, ADC_MICIN))
979 return 0;
980
981 mixer->set_input_left(mixer, MIX_LINE_IN, NULL);
982 mixer->set_input_right(mixer, MIX_LINE_IN, NULL);
983
984 hw->select_adc_source(hw, ADC_MICIN);
985
986 src = atc->srcs[2];
987 mixer->set_input_left(mixer, MIX_MIC_IN, &src->rsc);
988 src = atc->srcs[3];
989 mixer->set_input_right(mixer, MIX_MIC_IN, &src->rsc);
990
991 return 0;
992}
993
994static int atc_have_digit_io_switch(struct ct_atc *atc)
995{
996 struct hw *hw = atc->hw;
997
998 return hw->have_digit_io_switch(hw);
999}
1000
1001static int atc_select_digit_io(struct ct_atc *atc)
1002{
1003 struct hw *hw = atc->hw;
1004
1005 if (hw->is_adc_source_selected(hw, ADC_NONE))
1006 return 0;
1007
1008 hw->select_adc_source(hw, ADC_NONE);
1009
1010 return 0;
1011}
1012
1013static int atc_daio_unmute(struct ct_atc *atc, unsigned char state, int type)
1014{
1015 struct daio_mgr *daio_mgr = atc->rsc_mgrs[DAIO];
1016
1017 if (state)
1018 daio_mgr->daio_enable(daio_mgr, atc->daios[type]);
1019 else
1020 daio_mgr->daio_disable(daio_mgr, atc->daios[type]);
1021
1022 daio_mgr->commit_write(daio_mgr);
1023
1024 return 0;
1025}
1026
1027static int
1028atc_dao_get_status(struct ct_atc *atc, unsigned int *status, int type)
1029{
1030 struct dao *dao = container_of(atc->daios[type], struct dao, daio);
1031 return dao->ops->get_spos(dao, status);
1032}
1033
1034static int
1035atc_dao_set_status(struct ct_atc *atc, unsigned int status, int type)
1036{
1037 struct dao *dao = container_of(atc->daios[type], struct dao, daio);
1038
1039 dao->ops->set_spos(dao, status);
1040 dao->ops->commit_write(dao);
1041 return 0;
1042}
1043
1044static int atc_line_front_unmute(struct ct_atc *atc, unsigned char state)
1045{
1046 return atc_daio_unmute(atc, state, LINEO1);
1047}
1048
1049static int atc_line_surround_unmute(struct ct_atc *atc, unsigned char state)
1050{
1051 return atc_daio_unmute(atc, state, LINEO4);
1052}
1053
1054static int atc_line_clfe_unmute(struct ct_atc *atc, unsigned char state)
1055{
1056 return atc_daio_unmute(atc, state, LINEO3);
1057}
1058
1059static int atc_line_rear_unmute(struct ct_atc *atc, unsigned char state)
1060{
1061 return atc_daio_unmute(atc, state, LINEO2);
1062}
1063
1064static int atc_line_in_unmute(struct ct_atc *atc, unsigned char state)
1065{
1066 return atc_daio_unmute(atc, state, LINEIM);
1067}
1068
1069static int atc_spdif_out_unmute(struct ct_atc *atc, unsigned char state)
1070{
1071 return atc_daio_unmute(atc, state, SPDIFOO);
1072}
1073
1074static int atc_spdif_in_unmute(struct ct_atc *atc, unsigned char state)
1075{
1076 return atc_daio_unmute(atc, state, SPDIFIO);
1077}
1078
1079static int atc_spdif_out_get_status(struct ct_atc *atc, unsigned int *status)
1080{
1081 return atc_dao_get_status(atc, status, SPDIFOO);
1082}
1083
1084static int atc_spdif_out_set_status(struct ct_atc *atc, unsigned int status)
1085{
1086 return atc_dao_set_status(atc, status, SPDIFOO);
1087}
1088
1089static int atc_spdif_out_passthru(struct ct_atc *atc, unsigned char state)
1090{
1091 unsigned long flags;
1092 struct dao_desc da_dsc = {0};
1093 struct dao *dao;
1094 int err;
1095 struct ct_mixer *mixer = atc->mixer;
1096 struct rsc *rscs[2] = {NULL};
1097 unsigned int spos = 0;
1098
1099 spin_lock_irqsave(&atc->atc_lock, flags);
1100 dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
1101 da_dsc.msr = state ? 1 : atc->msr;
1102 da_dsc.passthru = state ? 1 : 0;
1103 err = dao->ops->reinit(dao, &da_dsc);
1104 if (state) {
1105 spos = IEC958_DEFAULT_CON;
1106 } else {
1107 mixer->get_output_ports(mixer, MIX_SPDIF_OUT,
1108 &rscs[0], &rscs[1]);
1109 dao->ops->set_left_input(dao, rscs[0]);
1110 dao->ops->set_right_input(dao, rscs[1]);
1111 /* Restore PLL to atc->rsr if needed. */
1112 if (atc->pll_rate != atc->rsr)
1113 err = atc_pll_init(atc, atc->rsr);
1114 }
1115 dao->ops->set_spos(dao, spos);
1116 dao->ops->commit_write(dao);
1117 spin_unlock_irqrestore(&atc->atc_lock, flags);
1118
1119 return err;
1120}
1121
1122static int ct_atc_destroy(struct ct_atc *atc)
1123{
1124 struct daio_mgr *daio_mgr;
1125 struct dao *dao;
1126 struct dai *dai;
1127 struct daio *daio;
1128 struct sum_mgr *sum_mgr;
1129 struct src_mgr *src_mgr;
1130 struct srcimp_mgr *srcimp_mgr;
1131 struct srcimp *srcimp;
1132 struct ct_mixer *mixer;
1133 int i = 0;
1134
1135 if (NULL == atc)
1136 return 0;
1137
1138 if (atc->timer) {
1139 ct_timer_free(atc->timer);
1140 atc->timer = NULL;
1141 }
1142
1143 /* Stop hardware and disable all interrupts */
1144 if (NULL != atc->hw)
1145 ((struct hw *)atc->hw)->card_stop(atc->hw);
1146
1147 /* Destroy internal mixer objects */
1148 if (NULL != atc->mixer) {
1149 mixer = atc->mixer;
1150 mixer->set_input_left(mixer, MIX_LINE_IN, NULL);
1151 mixer->set_input_right(mixer, MIX_LINE_IN, NULL);
1152 mixer->set_input_left(mixer, MIX_MIC_IN, NULL);
1153 mixer->set_input_right(mixer, MIX_MIC_IN, NULL);
1154 mixer->set_input_left(mixer, MIX_SPDIF_IN, NULL);
1155 mixer->set_input_right(mixer, MIX_SPDIF_IN, NULL);
1156 ct_mixer_destroy(atc->mixer);
1157 }
1158
1159 if (NULL != atc->daios) {
1160 daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO];
1161 for (i = 0; i < atc->n_daio; i++) {
1162 daio = atc->daios[i];
1163 if (daio->type < LINEIM) {
1164 dao = container_of(daio, struct dao, daio);
1165 dao->ops->clear_left_input(dao);
1166 dao->ops->clear_right_input(dao);
1167 } else {
1168 dai = container_of(daio, struct dai, daio);
1169 /* some thing to do for dai ... */
1170 }
1171 daio_mgr->put_daio(daio_mgr, daio);
1172 }
1173 kfree(atc->daios);
1174 }
1175
1176 if (NULL != atc->pcm) {
1177 sum_mgr = atc->rsc_mgrs[SUM];
1178 for (i = 0; i < atc->n_pcm; i++)
1179 sum_mgr->put_sum(sum_mgr, atc->pcm[i]);
1180
1181 kfree(atc->pcm);
1182 }
1183
1184 if (NULL != atc->srcs) {
1185 src_mgr = atc->rsc_mgrs[SRC];
1186 for (i = 0; i < atc->n_src; i++)
1187 src_mgr->put_src(src_mgr, atc->srcs[i]);
1188
1189 kfree(atc->srcs);
1190 }
1191
1192 if (NULL != atc->srcimps) {
1193 srcimp_mgr = atc->rsc_mgrs[SRCIMP];
1194 for (i = 0; i < atc->n_srcimp; i++) {
1195 srcimp = atc->srcimps[i];
1196 srcimp->ops->unmap(srcimp);
1197 srcimp_mgr->put_srcimp(srcimp_mgr, atc->srcimps[i]);
1198 }
1199 kfree(atc->srcimps);
1200 }
1201
1202 for (i = 0; i < NUM_RSCTYP; i++) {
1203 if ((NULL != rsc_mgr_funcs[i].destroy) &&
1204 (NULL != atc->rsc_mgrs[i]))
1205 rsc_mgr_funcs[i].destroy(atc->rsc_mgrs[i]);
1206
1207 }
1208
1209 if (NULL != atc->hw)
1210 destroy_hw_obj((struct hw *)atc->hw);
1211
1212 /* Destroy device virtual memory manager object */
1213 if (NULL != atc->vm) {
1214 ct_vm_destroy(atc->vm);
1215 atc->vm = NULL;
1216 }
1217
1218 kfree(atc);
1219
1220 return 0;
1221}
1222
1223static int atc_dev_free(struct snd_device *dev)
1224{
1225 struct ct_atc *atc = dev->device_data;
1226 return ct_atc_destroy(atc);
1227}
1228
1229static int __devinit atc_identify_card(struct ct_atc *atc)
1230{
1231 const struct snd_pci_quirk *p;
1232 const struct snd_pci_quirk *list;
1233
1234 switch (atc->chip_type) {
1235 case ATC20K1:
1236 atc->chip_name = "20K1";
1237 list = subsys_20k1_list;
1238 break;
1239 case ATC20K2:
1240 atc->chip_name = "20K2";
1241 list = subsys_20k2_list;
1242 break;
1243 default:
1244 return -ENOENT;
1245 }
1246 p = snd_pci_quirk_lookup(atc->pci, list);
1247 if (!p)
1248 return -ENOENT;
1249 atc->model = p->value;
1250 atc->model_name = ct_subsys_name[atc->model];
1251 snd_printd("ctxfi: chip %s model %s (%04x:%04x) is found\n",
1252 atc->chip_name, atc->model_name,
1253 atc->pci->subsystem_vendor,
1254 atc->pci->subsystem_device);
1255 return 0;
1256}
1257
1258int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc)
1259{
1260 enum CTALSADEVS i;
1261 int err;
1262
1263 alsa_dev_funcs[MIXER].public_name = atc->chip_name;
1264
1265 for (i = 0; i < NUM_CTALSADEVS; i++) {
1266 if (NULL == alsa_dev_funcs[i].create)
1267 continue;
1268
1269 err = alsa_dev_funcs[i].create(atc, i,
1270 alsa_dev_funcs[i].public_name);
1271 if (err) {
1272 printk(KERN_ERR "ctxfi: "
1273 "Creating alsa device %d failed!\n", i);
1274 return err;
1275 }
1276 }
1277
1278 return 0;
1279}
1280
1281static int __devinit atc_create_hw_devs(struct ct_atc *atc)
1282{
1283 struct hw *hw;
1284 struct card_conf info = {0};
1285 int i, err;
1286
1287 err = create_hw_obj(atc->pci, atc->chip_type, atc->model, &hw);
1288 if (err) {
1289 printk(KERN_ERR "Failed to create hw obj!!!\n");
1290 return err;
1291 }
1292 atc->hw = hw;
1293
1294 /* Initialize card hardware. */
1295 info.rsr = atc->rsr;
1296 info.msr = atc->msr;
1297 info.vm_pgt_phys = atc_get_ptp_phys(atc, 0);
1298 err = hw->card_init(hw, &info);
1299 if (err < 0)
1300 return err;
1301
1302 for (i = 0; i < NUM_RSCTYP; i++) {
1303 if (NULL == rsc_mgr_funcs[i].create)
1304 continue;
1305
1306 err = rsc_mgr_funcs[i].create(atc->hw, &atc->rsc_mgrs[i]);
1307 if (err) {
1308 printk(KERN_ERR "ctxfi: "
1309 "Failed to create rsc_mgr %d!!!\n", i);
1310 return err;
1311 }
1312 }
1313
1314 return 0;
1315}
1316
1317static int __devinit atc_get_resources(struct ct_atc *atc)
1318{
1319 struct daio_desc da_desc = {0};
1320 struct daio_mgr *daio_mgr;
1321 struct src_desc src_dsc = {0};
1322 struct src_mgr *src_mgr;
1323 struct srcimp_desc srcimp_dsc = {0};
1324 struct srcimp_mgr *srcimp_mgr;
1325 struct sum_desc sum_dsc = {0};
1326 struct sum_mgr *sum_mgr;
1327 int err, i;
1328
1329 atc->daios = kzalloc(sizeof(void *)*(DAIONUM), GFP_KERNEL);
1330 if (NULL == atc->daios)
1331 return -ENOMEM;
1332
1333 atc->srcs = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL);
1334 if (NULL == atc->srcs)
1335 return -ENOMEM;
1336
1337 atc->srcimps = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL);
1338 if (NULL == atc->srcimps)
1339 return -ENOMEM;
1340
1341 atc->pcm = kzalloc(sizeof(void *)*(2*4), GFP_KERNEL);
1342 if (NULL == atc->pcm)
1343 return -ENOMEM;
1344
1345 daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO];
1346 da_desc.msr = atc->msr;
1347 for (i = 0, atc->n_daio = 0; i < DAIONUM-1; i++) {
1348 da_desc.type = i;
1349 err = daio_mgr->get_daio(daio_mgr, &da_desc,
1350 (struct daio **)&atc->daios[i]);
1351 if (err) {
1352 printk(KERN_ERR "ctxfi: Failed to get DAIO "
1353 "resource %d!!!\n", i);
1354 return err;
1355 }
1356 atc->n_daio++;
1357 }
1358 if (atc->model == CTSB073X)
1359 da_desc.type = SPDIFI1;
1360 else
1361 da_desc.type = SPDIFIO;
1362 err = daio_mgr->get_daio(daio_mgr, &da_desc,
1363 (struct daio **)&atc->daios[i]);
1364 if (err) {
1365 printk(KERN_ERR "ctxfi: Failed to get S/PDIF-in resource!!!\n");
1366 return err;
1367 }
1368 atc->n_daio++;
1369
1370 src_mgr = atc->rsc_mgrs[SRC];
1371 src_dsc.multi = 1;
1372 src_dsc.msr = atc->msr;
1373 src_dsc.mode = ARCRW;
1374 for (i = 0, atc->n_src = 0; i < (2*2); i++) {
1375 err = src_mgr->get_src(src_mgr, &src_dsc,
1376 (struct src **)&atc->srcs[i]);
1377 if (err)
1378 return err;
1379
1380 atc->n_src++;
1381 }
1382
1383 srcimp_mgr = atc->rsc_mgrs[SRCIMP];
1384 srcimp_dsc.msr = 8; /* SRCIMPs for S/PDIFIn SRT */
1385 for (i = 0, atc->n_srcimp = 0; i < (2*1); i++) {
1386 err = srcimp_mgr->get_srcimp(srcimp_mgr, &srcimp_dsc,
1387 (struct srcimp **)&atc->srcimps[i]);
1388 if (err)
1389 return err;
1390
1391 atc->n_srcimp++;
1392 }
1393 srcimp_dsc.msr = 8; /* SRCIMPs for LINE/MICIn SRT */
1394 for (i = 0; i < (2*1); i++) {
1395 err = srcimp_mgr->get_srcimp(srcimp_mgr, &srcimp_dsc,
1396 (struct srcimp **)&atc->srcimps[2*1+i]);
1397 if (err)
1398 return err;
1399
1400 atc->n_srcimp++;
1401 }
1402
1403 sum_mgr = atc->rsc_mgrs[SUM];
1404 sum_dsc.msr = atc->msr;
1405 for (i = 0, atc->n_pcm = 0; i < (2*4); i++) {
1406 err = sum_mgr->get_sum(sum_mgr, &sum_dsc,
1407 (struct sum **)&atc->pcm[i]);
1408 if (err)
1409 return err;
1410
1411 atc->n_pcm++;
1412 }
1413
1414 err = ct_mixer_create(atc, (struct ct_mixer **)&atc->mixer);
1415 if (err) {
1416 printk(KERN_ERR "ctxfi: Failed to create mixer obj!!!\n");
1417 return err;
1418 }
1419
1420 return 0;
1421}
1422
1423static void __devinit
1424atc_connect_dai(struct src_mgr *src_mgr, struct dai *dai,
1425 struct src **srcs, struct srcimp **srcimps)
1426{
1427 struct rsc *rscs[2] = {NULL};
1428 struct src *src;
1429 struct srcimp *srcimp;
1430 int i = 0;
1431
1432 rscs[0] = &dai->daio.rscl;
1433 rscs[1] = &dai->daio.rscr;
1434 for (i = 0; i < 2; i++) {
1435 src = srcs[i];
1436 srcimp = srcimps[i];
1437 srcimp->ops->map(srcimp, src, rscs[i]);
1438 src_mgr->src_disable(src_mgr, src);
1439 }
1440
1441 src_mgr->commit_write(src_mgr); /* Actually disable SRCs */
1442
1443 src = srcs[0];
1444 src->ops->set_pm(src, 1);
1445 for (i = 0; i < 2; i++) {
1446 src = srcs[i];
1447 src->ops->set_state(src, SRC_STATE_RUN);
1448 src->ops->commit_write(src);
1449 src_mgr->src_enable_s(src_mgr, src);
1450 }
1451
1452 dai->ops->set_srt_srcl(dai, &(srcs[0]->rsc));
1453 dai->ops->set_srt_srcr(dai, &(srcs[1]->rsc));
1454
1455 dai->ops->set_enb_src(dai, 1);
1456 dai->ops->set_enb_srt(dai, 1);
1457 dai->ops->commit_write(dai);
1458
1459 src_mgr->commit_write(src_mgr); /* Synchronously enable SRCs */
1460}
1461
1462static void __devinit atc_connect_resources(struct ct_atc *atc)
1463{
1464 struct dai *dai;
1465 struct dao *dao;
1466 struct src *src;
1467 struct sum *sum;
1468 struct ct_mixer *mixer;
1469 struct rsc *rscs[2] = {NULL};
1470 int i, j;
1471
1472 mixer = atc->mixer;
1473
1474 for (i = MIX_WAVE_FRONT, j = LINEO1; i <= MIX_SPDIF_OUT; i++, j++) {
1475 mixer->get_output_ports(mixer, i, &rscs[0], &rscs[1]);
1476 dao = container_of(atc->daios[j], struct dao, daio);
1477 dao->ops->set_left_input(dao, rscs[0]);
1478 dao->ops->set_right_input(dao, rscs[1]);
1479 }
1480
1481 dai = container_of(atc->daios[LINEIM], struct dai, daio);
1482 atc_connect_dai(atc->rsc_mgrs[SRC], dai,
1483 (struct src **)&atc->srcs[2],
1484 (struct srcimp **)&atc->srcimps[2]);
1485 src = atc->srcs[2];
1486 mixer->set_input_left(mixer, MIX_LINE_IN, &src->rsc);
1487 src = atc->srcs[3];
1488 mixer->set_input_right(mixer, MIX_LINE_IN, &src->rsc);
1489
1490 dai = container_of(atc->daios[SPDIFIO], struct dai, daio);
1491 atc_connect_dai(atc->rsc_mgrs[SRC], dai,
1492 (struct src **)&atc->srcs[0],
1493 (struct srcimp **)&atc->srcimps[0]);
1494
1495 src = atc->srcs[0];
1496 mixer->set_input_left(mixer, MIX_SPDIF_IN, &src->rsc);
1497 src = atc->srcs[1];
1498 mixer->set_input_right(mixer, MIX_SPDIF_IN, &src->rsc);
1499
1500 for (i = MIX_PCMI_FRONT, j = 0; i <= MIX_PCMI_SURROUND; i++, j += 2) {
1501 sum = atc->pcm[j];
1502 mixer->set_input_left(mixer, i, &sum->rsc);
1503 sum = atc->pcm[j+1];
1504 mixer->set_input_right(mixer, i, &sum->rsc);
1505 }
1506}
1507
1508static struct ct_atc atc_preset __devinitdata = {
1509 .map_audio_buffer = ct_map_audio_buffer,
1510 .unmap_audio_buffer = ct_unmap_audio_buffer,
1511 .pcm_playback_prepare = atc_pcm_playback_prepare,
1512 .pcm_release_resources = atc_pcm_release_resources,
1513 .pcm_playback_start = atc_pcm_playback_start,
1514 .pcm_playback_stop = atc_pcm_stop,
1515 .pcm_playback_position = atc_pcm_playback_position,
1516 .pcm_capture_prepare = atc_pcm_capture_prepare,
1517 .pcm_capture_start = atc_pcm_capture_start,
1518 .pcm_capture_stop = atc_pcm_stop,
1519 .pcm_capture_position = atc_pcm_capture_position,
1520 .spdif_passthru_playback_prepare = spdif_passthru_playback_prepare,
1521 .get_ptp_phys = atc_get_ptp_phys,
1522 .select_line_in = atc_select_line_in,
1523 .select_mic_in = atc_select_mic_in,
1524 .select_digit_io = atc_select_digit_io,
1525 .line_front_unmute = atc_line_front_unmute,
1526 .line_surround_unmute = atc_line_surround_unmute,
1527 .line_clfe_unmute = atc_line_clfe_unmute,
1528 .line_rear_unmute = atc_line_rear_unmute,
1529 .line_in_unmute = atc_line_in_unmute,
1530 .spdif_out_unmute = atc_spdif_out_unmute,
1531 .spdif_in_unmute = atc_spdif_in_unmute,
1532 .spdif_out_get_status = atc_spdif_out_get_status,
1533 .spdif_out_set_status = atc_spdif_out_set_status,
1534 .spdif_out_passthru = atc_spdif_out_passthru,
1535 .have_digit_io_switch = atc_have_digit_io_switch,
1536};
1537
1538/**
1539 * ct_atc_create - create and initialize a hardware manager
1540 * @card: corresponding alsa card object
1541 * @pci: corresponding kernel pci device object
1542 * @ratc: return created object address in it
1543 *
1544 * Creates and initializes a hardware manager.
1545 *
1546 * Creates kmallocated ct_atc structure. Initializes hardware.
1547 * Returns 0 if suceeds, or negative error code if fails.
1548 */
1549
1550int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
1551 unsigned int rsr, unsigned int msr,
1552 int chip_type, struct ct_atc **ratc)
1553{
1554 struct ct_atc *atc;
1555 static struct snd_device_ops ops = {
1556 .dev_free = atc_dev_free,
1557 };
1558 int err;
1559
1560 *ratc = NULL;
1561
1562 atc = kzalloc(sizeof(*atc), GFP_KERNEL);
1563 if (NULL == atc)
1564 return -ENOMEM;
1565
1566 /* Set operations */
1567 *atc = atc_preset;
1568
1569 atc->card = card;
1570 atc->pci = pci;
1571 atc->rsr = rsr;
1572 atc->msr = msr;
1573 atc->chip_type = chip_type;
1574
1575 spin_lock_init(&atc->atc_lock);
1576
1577 /* Find card model */
1578 err = atc_identify_card(atc);
1579 if (err < 0) {
1580 printk(KERN_ERR "ctatc: Card not recognised\n");
1581 goto error1;
1582 }
1583
1584 /* Set up device virtual memory management object */
1585 err = ct_vm_create(&atc->vm);
1586 if (err < 0)
1587 goto error1;
1588
1589 /* Create all atc hw devices */
1590 err = atc_create_hw_devs(atc);
1591 if (err < 0)
1592 goto error1;
1593
1594 /* Get resources */
1595 err = atc_get_resources(atc);
1596 if (err < 0)
1597 goto error1;
1598
1599 /* Build topology */
1600 atc_connect_resources(atc);
1601
1602 atc->timer = ct_timer_new(atc);
1603 if (!atc->timer)
1604 goto error1;
1605
1606 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops);
1607 if (err < 0)
1608 goto error1;
1609
1610 snd_card_set_dev(card, &pci->dev);
1611
1612 *ratc = atc;
1613 return 0;
1614
1615error1:
1616 ct_atc_destroy(atc);
1617 printk(KERN_ERR "ctxfi: Something wrong!!!\n");
1618 return err;
1619}
diff --git a/sound/pci/ctxfi/ctatc.h b/sound/pci/ctxfi/ctatc.h
new file mode 100644
index 000000000000..a03347232e84
--- /dev/null
+++ b/sound/pci/ctxfi/ctatc.h
@@ -0,0 +1,147 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctatc.h
9 *
10 * @Brief
11 * This file contains the definition of the device resource management object.
12 *
13 * @Author Liu Chun
14 * @Date Mar 28 2008
15 *
16 */
17
18#ifndef CTATC_H
19#define CTATC_H
20
21#include <linux/types.h>
22#include <linux/spinlock_types.h>
23#include <linux/pci.h>
24#include <linux/timer.h>
25#include <sound/core.h>
26
27#include "ctvmem.h"
28#include "ctresource.h"
29
30enum CTALSADEVS { /* Types of alsa devices */
31 FRONT,
32 SURROUND,
33 CLFE,
34 SIDE,
35 IEC958,
36 MIXER,
37 NUM_CTALSADEVS /* This should always be the last */
38};
39
40struct ct_atc_chip_sub_details {
41 u16 subsys;
42 const char *nm_model;
43};
44
45struct ct_atc_chip_details {
46 u16 vendor;
47 u16 device;
48 const struct ct_atc_chip_sub_details *sub_details;
49 const char *nm_card;
50};
51
52struct ct_atc;
53struct ct_timer;
54struct ct_timer_instance;
55
56/* alsa pcm stream descriptor */
57struct ct_atc_pcm {
58 struct snd_pcm_substream *substream;
59 void (*interrupt)(struct ct_atc_pcm *apcm);
60 struct ct_timer_instance *timer;
61 unsigned int started:1;
62
63 /* Only mono and interleaved modes are supported now. */
64 struct ct_vm_block *vm_block;
65 void *src; /* SRC for interacting with host memory */
66 void **srccs; /* SRCs for sample rate conversion */
67 void **srcimps; /* SRC Input Mappers */
68 void **amixers; /* AMIXERs for routing converted data */
69 void *mono; /* A SUM resource for mixing chs to one */
70 unsigned char n_srcc; /* Number of converting SRCs */
71 unsigned char n_srcimp; /* Number of SRC Input Mappers */
72 unsigned char n_amixer; /* Number of AMIXERs */
73};
74
75/* Chip resource management object */
76struct ct_atc {
77 struct pci_dev *pci;
78 struct snd_card *card;
79 unsigned int rsr; /* reference sample rate in Hz */
80 unsigned int msr; /* master sample rate in rsr */
81 unsigned int pll_rate; /* current rate of Phase Lock Loop */
82
83 int chip_type;
84 int model;
85 const char *chip_name;
86 const char *model_name;
87
88 struct ct_vm *vm; /* device virtual memory manager for this card */
89 int (*map_audio_buffer)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
90 void (*unmap_audio_buffer)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
91 unsigned long (*get_ptp_phys)(struct ct_atc *atc, int index);
92
93 spinlock_t atc_lock;
94
95 int (*pcm_playback_prepare)(struct ct_atc *atc,
96 struct ct_atc_pcm *apcm);
97 int (*pcm_playback_start)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
98 int (*pcm_playback_stop)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
99 int (*pcm_playback_position)(struct ct_atc *atc,
100 struct ct_atc_pcm *apcm);
101 int (*spdif_passthru_playback_prepare)(struct ct_atc *atc,
102 struct ct_atc_pcm *apcm);
103 int (*pcm_capture_prepare)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
104 int (*pcm_capture_start)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
105 int (*pcm_capture_stop)(struct ct_atc *atc, struct ct_atc_pcm *apcm);
106 int (*pcm_capture_position)(struct ct_atc *atc,
107 struct ct_atc_pcm *apcm);
108 int (*pcm_release_resources)(struct ct_atc *atc,
109 struct ct_atc_pcm *apcm);
110 int (*select_line_in)(struct ct_atc *atc);
111 int (*select_mic_in)(struct ct_atc *atc);
112 int (*select_digit_io)(struct ct_atc *atc);
113 int (*line_front_unmute)(struct ct_atc *atc, unsigned char state);
114 int (*line_surround_unmute)(struct ct_atc *atc, unsigned char state);
115 int (*line_clfe_unmute)(struct ct_atc *atc, unsigned char state);
116 int (*line_rear_unmute)(struct ct_atc *atc, unsigned char state);
117 int (*line_in_unmute)(struct ct_atc *atc, unsigned char state);
118 int (*spdif_out_unmute)(struct ct_atc *atc, unsigned char state);
119 int (*spdif_in_unmute)(struct ct_atc *atc, unsigned char state);
120 int (*spdif_out_get_status)(struct ct_atc *atc, unsigned int *status);
121 int (*spdif_out_set_status)(struct ct_atc *atc, unsigned int status);
122 int (*spdif_out_passthru)(struct ct_atc *atc, unsigned char state);
123 int (*have_digit_io_switch)(struct ct_atc *atc);
124
125 /* Don't touch! Used for internal object. */
126 void *rsc_mgrs[NUM_RSCTYP]; /* chip resource managers */
127 void *mixer; /* internal mixer object */
128 void *hw; /* chip specific hardware access object */
129 void **daios; /* digital audio io resources */
130 void **pcm; /* SUMs for collecting all pcm stream */
131 void **srcs; /* Sample Rate Converters for input signal */
132 void **srcimps; /* input mappers for SRCs */
133 unsigned char n_daio;
134 unsigned char n_src;
135 unsigned char n_srcimp;
136 unsigned char n_pcm;
137
138 struct ct_timer *timer;
139};
140
141
142int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
143 unsigned int rsr, unsigned int msr, int chip_type,
144 struct ct_atc **ratc);
145int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc);
146
147#endif /* CTATC_H */
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
new file mode 100644
index 000000000000..082e35c08c02
--- /dev/null
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -0,0 +1,769 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctdaio.c
9 *
10 * @Brief
11 * This file contains the implementation of Digital Audio Input Output
12 * resource management object.
13 *
14 * @Author Liu Chun
15 * @Date May 23 2008
16 *
17 */
18
19#include "ctdaio.h"
20#include "cthardware.h"
21#include "ctimap.h"
22#include <linux/slab.h>
23#include <linux/kernel.h>
24
25#define DAIO_RESOURCE_NUM NUM_DAIOTYP
26#define DAIO_OUT_MAX SPDIFOO
27
28union daio_usage {
29 struct {
30 unsigned short lineo1:1;
31 unsigned short lineo2:1;
32 unsigned short lineo3:1;
33 unsigned short lineo4:1;
34 unsigned short spdifoo:1;
35 unsigned short lineim:1;
36 unsigned short spdifio:1;
37 unsigned short spdifi1:1;
38 } bf;
39 unsigned short data;
40};
41
42struct daio_rsc_idx {
43 unsigned short left;
44 unsigned short right;
45};
46
47struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
48 [LINEO1] = {.left = 0x00, .right = 0x01},
49 [LINEO2] = {.left = 0x18, .right = 0x19},
50 [LINEO3] = {.left = 0x08, .right = 0x09},
51 [LINEO4] = {.left = 0x10, .right = 0x11},
52 [LINEIM] = {.left = 0x1b5, .right = 0x1bd},
53 [SPDIFOO] = {.left = 0x20, .right = 0x21},
54 [SPDIFIO] = {.left = 0x15, .right = 0x1d},
55 [SPDIFI1] = {.left = 0x95, .right = 0x9d},
56};
57
58struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
59 [LINEO1] = {.left = 0x40, .right = 0x41},
60 [LINEO2] = {.left = 0x70, .right = 0x71},
61 [LINEO3] = {.left = 0x50, .right = 0x51},
62 [LINEO4] = {.left = 0x60, .right = 0x61},
63 [LINEIM] = {.left = 0x45, .right = 0xc5},
64 [SPDIFOO] = {.left = 0x00, .right = 0x01},
65 [SPDIFIO] = {.left = 0x05, .right = 0x85},
66};
67
68static int daio_master(struct rsc *rsc)
69{
70 /* Actually, this is not the resource index of DAIO.
71 * For DAO, it is the input mapper index. And, for DAI,
72 * it is the output time-slot index. */
73 return rsc->conj = rsc->idx;
74}
75
76static int daio_index(const struct rsc *rsc)
77{
78 return rsc->conj;
79}
80
81static int daio_out_next_conj(struct rsc *rsc)
82{
83 return rsc->conj += 2;
84}
85
86static int daio_in_next_conj_20k1(struct rsc *rsc)
87{
88 return rsc->conj += 0x200;
89}
90
91static int daio_in_next_conj_20k2(struct rsc *rsc)
92{
93 return rsc->conj += 0x100;
94}
95
96static struct rsc_ops daio_out_rsc_ops = {
97 .master = daio_master,
98 .next_conj = daio_out_next_conj,
99 .index = daio_index,
100 .output_slot = NULL,
101};
102
103static struct rsc_ops daio_in_rsc_ops_20k1 = {
104 .master = daio_master,
105 .next_conj = daio_in_next_conj_20k1,
106 .index = NULL,
107 .output_slot = daio_index,
108};
109
110static struct rsc_ops daio_in_rsc_ops_20k2 = {
111 .master = daio_master,
112 .next_conj = daio_in_next_conj_20k2,
113 .index = NULL,
114 .output_slot = daio_index,
115};
116
117static unsigned int daio_device_index(enum DAIOTYP type, struct hw *hw)
118{
119 switch (hw->chip_type) {
120 case ATC20K1:
121 switch (type) {
122 case SPDIFOO: return 0;
123 case SPDIFIO: return 0;
124 case SPDIFI1: return 1;
125 case LINEO1: return 4;
126 case LINEO2: return 7;
127 case LINEO3: return 5;
128 case LINEO4: return 6;
129 case LINEIM: return 7;
130 default: return -EINVAL;
131 }
132 case ATC20K2:
133 switch (type) {
134 case SPDIFOO: return 0;
135 case SPDIFIO: return 0;
136 case LINEO1: return 4;
137 case LINEO2: return 7;
138 case LINEO3: return 5;
139 case LINEO4: return 6;
140 case LINEIM: return 4;
141 default: return -EINVAL;
142 }
143 default:
144 return -EINVAL;
145 }
146}
147
148static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc);
149
150static int dao_spdif_get_spos(struct dao *dao, unsigned int *spos)
151{
152 ((struct hw *)dao->hw)->dao_get_spos(dao->ctrl_blk, spos);
153 return 0;
154}
155
156static int dao_spdif_set_spos(struct dao *dao, unsigned int spos)
157{
158 ((struct hw *)dao->hw)->dao_set_spos(dao->ctrl_blk, spos);
159 return 0;
160}
161
162static int dao_commit_write(struct dao *dao)
163{
164 ((struct hw *)dao->hw)->dao_commit_write(dao->hw,
165 daio_device_index(dao->daio.type, dao->hw), dao->ctrl_blk);
166 return 0;
167}
168
169static int dao_set_left_input(struct dao *dao, struct rsc *input)
170{
171 struct imapper *entry;
172 struct daio *daio = &dao->daio;
173 int i;
174
175 entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL);
176 if (NULL == entry)
177 return -ENOMEM;
178
179 /* Program master and conjugate resources */
180 input->ops->master(input);
181 daio->rscl.ops->master(&daio->rscl);
182 for (i = 0; i < daio->rscl.msr; i++, entry++) {
183 entry->slot = input->ops->output_slot(input);
184 entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl);
185 dao->mgr->imap_add(dao->mgr, entry);
186 dao->imappers[i] = entry;
187
188 input->ops->next_conj(input);
189 daio->rscl.ops->next_conj(&daio->rscl);
190 }
191 input->ops->master(input);
192 daio->rscl.ops->master(&daio->rscl);
193
194 return 0;
195}
196
197static int dao_set_right_input(struct dao *dao, struct rsc *input)
198{
199 struct imapper *entry;
200 struct daio *daio = &dao->daio;
201 int i;
202
203 entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL);
204 if (NULL == entry)
205 return -ENOMEM;
206
207 /* Program master and conjugate resources */
208 input->ops->master(input);
209 daio->rscr.ops->master(&daio->rscr);
210 for (i = 0; i < daio->rscr.msr; i++, entry++) {
211 entry->slot = input->ops->output_slot(input);
212 entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr);
213 dao->mgr->imap_add(dao->mgr, entry);
214 dao->imappers[daio->rscl.msr + i] = entry;
215
216 input->ops->next_conj(input);
217 daio->rscr.ops->next_conj(&daio->rscr);
218 }
219 input->ops->master(input);
220 daio->rscr.ops->master(&daio->rscr);
221
222 return 0;
223}
224
225static int dao_clear_left_input(struct dao *dao)
226{
227 struct imapper *entry;
228 struct daio *daio = &dao->daio;
229 int i;
230
231 if (NULL == dao->imappers[0])
232 return 0;
233
234 entry = dao->imappers[0];
235 dao->mgr->imap_delete(dao->mgr, entry);
236 /* Program conjugate resources */
237 for (i = 1; i < daio->rscl.msr; i++) {
238 entry = dao->imappers[i];
239 dao->mgr->imap_delete(dao->mgr, entry);
240 dao->imappers[i] = NULL;
241 }
242
243 kfree(dao->imappers[0]);
244 dao->imappers[0] = NULL;
245
246 return 0;
247}
248
249static int dao_clear_right_input(struct dao *dao)
250{
251 struct imapper *entry;
252 struct daio *daio = &dao->daio;
253 int i;
254
255 if (NULL == dao->imappers[daio->rscl.msr])
256 return 0;
257
258 entry = dao->imappers[daio->rscl.msr];
259 dao->mgr->imap_delete(dao->mgr, entry);
260 /* Program conjugate resources */
261 for (i = 1; i < daio->rscr.msr; i++) {
262 entry = dao->imappers[daio->rscl.msr + i];
263 dao->mgr->imap_delete(dao->mgr, entry);
264 dao->imappers[daio->rscl.msr + i] = NULL;
265 }
266
267 kfree(dao->imappers[daio->rscl.msr]);
268 dao->imappers[daio->rscl.msr] = NULL;
269
270 return 0;
271}
272
273static struct dao_rsc_ops dao_ops = {
274 .set_spos = dao_spdif_set_spos,
275 .commit_write = dao_commit_write,
276 .get_spos = dao_spdif_get_spos,
277 .reinit = dao_rsc_reinit,
278 .set_left_input = dao_set_left_input,
279 .set_right_input = dao_set_right_input,
280 .clear_left_input = dao_clear_left_input,
281 .clear_right_input = dao_clear_right_input,
282};
283
284static int dai_set_srt_srcl(struct dai *dai, struct rsc *src)
285{
286 src->ops->master(src);
287 ((struct hw *)dai->hw)->dai_srt_set_srcm(dai->ctrl_blk,
288 src->ops->index(src));
289 return 0;
290}
291
292static int dai_set_srt_srcr(struct dai *dai, struct rsc *src)
293{
294 src->ops->master(src);
295 ((struct hw *)dai->hw)->dai_srt_set_srco(dai->ctrl_blk,
296 src->ops->index(src));
297 return 0;
298}
299
300static int dai_set_srt_msr(struct dai *dai, unsigned int msr)
301{
302 unsigned int rsr;
303
304 for (rsr = 0; msr > 1; msr >>= 1)
305 rsr++;
306
307 ((struct hw *)dai->hw)->dai_srt_set_rsr(dai->ctrl_blk, rsr);
308 return 0;
309}
310
311static int dai_set_enb_src(struct dai *dai, unsigned int enb)
312{
313 ((struct hw *)dai->hw)->dai_srt_set_ec(dai->ctrl_blk, enb);
314 return 0;
315}
316
317static int dai_set_enb_srt(struct dai *dai, unsigned int enb)
318{
319 ((struct hw *)dai->hw)->dai_srt_set_et(dai->ctrl_blk, enb);
320 return 0;
321}
322
323static int dai_commit_write(struct dai *dai)
324{
325 ((struct hw *)dai->hw)->dai_commit_write(dai->hw,
326 daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
327 return 0;
328}
329
330static struct dai_rsc_ops dai_ops = {
331 .set_srt_srcl = dai_set_srt_srcl,
332 .set_srt_srcr = dai_set_srt_srcr,
333 .set_srt_msr = dai_set_srt_msr,
334 .set_enb_src = dai_set_enb_src,
335 .set_enb_srt = dai_set_enb_srt,
336 .commit_write = dai_commit_write,
337};
338
339static int daio_rsc_init(struct daio *daio,
340 const struct daio_desc *desc,
341 void *hw)
342{
343 int err;
344 unsigned int idx_l, idx_r;
345
346 switch (((struct hw *)hw)->chip_type) {
347 case ATC20K1:
348 idx_l = idx_20k1[desc->type].left;
349 idx_r = idx_20k1[desc->type].right;
350 break;
351 case ATC20K2:
352 idx_l = idx_20k2[desc->type].left;
353 idx_r = idx_20k2[desc->type].right;
354 break;
355 default:
356 return -EINVAL;
357 }
358 err = rsc_init(&daio->rscl, idx_l, DAIO, desc->msr, hw);
359 if (err)
360 return err;
361
362 err = rsc_init(&daio->rscr, idx_r, DAIO, desc->msr, hw);
363 if (err)
364 goto error1;
365
366 /* Set daio->rscl/r->ops to daio specific ones */
367 if (desc->type <= DAIO_OUT_MAX) {
368 daio->rscl.ops = daio->rscr.ops = &daio_out_rsc_ops;
369 } else {
370 switch (((struct hw *)hw)->chip_type) {
371 case ATC20K1:
372 daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k1;
373 break;
374 case ATC20K2:
375 daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k2;
376 break;
377 default:
378 break;
379 }
380 }
381 daio->type = desc->type;
382
383 return 0;
384
385error1:
386 rsc_uninit(&daio->rscl);
387 return err;
388}
389
390static int daio_rsc_uninit(struct daio *daio)
391{
392 rsc_uninit(&daio->rscl);
393 rsc_uninit(&daio->rscr);
394
395 return 0;
396}
397
398static int dao_rsc_init(struct dao *dao,
399 const struct daio_desc *desc,
400 struct daio_mgr *mgr)
401{
402 struct hw *hw = mgr->mgr.hw;
403 unsigned int conf;
404 int err;
405
406 err = daio_rsc_init(&dao->daio, desc, mgr->mgr.hw);
407 if (err)
408 return err;
409
410 dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL);
411 if (NULL == dao->imappers) {
412 err = -ENOMEM;
413 goto error1;
414 }
415 dao->ops = &dao_ops;
416 dao->mgr = mgr;
417 dao->hw = hw;
418 err = hw->dao_get_ctrl_blk(&dao->ctrl_blk);
419 if (err)
420 goto error2;
421
422 hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk,
423 daio_device_index(dao->daio.type, hw));
424 hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
425
426 conf = (desc->msr & 0x7) | (desc->passthru << 3);
427 hw->daio_mgr_dao_init(mgr->mgr.ctrl_blk,
428 daio_device_index(dao->daio.type, hw), conf);
429 hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk,
430 daio_device_index(dao->daio.type, hw));
431 hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
432
433 return 0;
434
435error2:
436 kfree(dao->imappers);
437 dao->imappers = NULL;
438error1:
439 daio_rsc_uninit(&dao->daio);
440 return err;
441}
442
443static int dao_rsc_uninit(struct dao *dao)
444{
445 if (NULL != dao->imappers) {
446 if (NULL != dao->imappers[0])
447 dao_clear_left_input(dao);
448
449 if (NULL != dao->imappers[dao->daio.rscl.msr])
450 dao_clear_right_input(dao);
451
452 kfree(dao->imappers);
453 dao->imappers = NULL;
454 }
455 ((struct hw *)dao->hw)->dao_put_ctrl_blk(dao->ctrl_blk);
456 dao->hw = dao->ctrl_blk = NULL;
457 daio_rsc_uninit(&dao->daio);
458
459 return 0;
460}
461
462static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc)
463{
464 struct daio_mgr *mgr = dao->mgr;
465 struct daio_desc dsc = {0};
466
467 dsc.type = dao->daio.type;
468 dsc.msr = desc->msr;
469 dsc.passthru = desc->passthru;
470 dao_rsc_uninit(dao);
471 return dao_rsc_init(dao, &dsc, mgr);
472}
473
474static int dai_rsc_init(struct dai *dai,
475 const struct daio_desc *desc,
476 struct daio_mgr *mgr)
477{
478 int err;
479 struct hw *hw = mgr->mgr.hw;
480 unsigned int rsr, msr;
481
482 err = daio_rsc_init(&dai->daio, desc, mgr->mgr.hw);
483 if (err)
484 return err;
485
486 dai->ops = &dai_ops;
487 dai->hw = mgr->mgr.hw;
488 err = hw->dai_get_ctrl_blk(&dai->ctrl_blk);
489 if (err)
490 goto error1;
491
492 for (rsr = 0, msr = desc->msr; msr > 1; msr >>= 1)
493 rsr++;
494
495 hw->dai_srt_set_rsr(dai->ctrl_blk, rsr);
496 hw->dai_srt_set_drat(dai->ctrl_blk, 0);
497 /* default to disabling control of a SRC */
498 hw->dai_srt_set_ec(dai->ctrl_blk, 0);
499 hw->dai_srt_set_et(dai->ctrl_blk, 0); /* default to disabling SRT */
500 hw->dai_commit_write(hw,
501 daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
502
503 return 0;
504
505error1:
506 daio_rsc_uninit(&dai->daio);
507 return err;
508}
509
510static int dai_rsc_uninit(struct dai *dai)
511{
512 ((struct hw *)dai->hw)->dai_put_ctrl_blk(dai->ctrl_blk);
513 dai->hw = dai->ctrl_blk = NULL;
514 daio_rsc_uninit(&dai->daio);
515 return 0;
516}
517
518static int daio_mgr_get_rsc(struct rsc_mgr *mgr, enum DAIOTYP type)
519{
520 if (((union daio_usage *)mgr->rscs)->data & (0x1 << type))
521 return -ENOENT;
522
523 ((union daio_usage *)mgr->rscs)->data |= (0x1 << type);
524
525 return 0;
526}
527
528static int daio_mgr_put_rsc(struct rsc_mgr *mgr, enum DAIOTYP type)
529{
530 ((union daio_usage *)mgr->rscs)->data &= ~(0x1 << type);
531
532 return 0;
533}
534
535static int get_daio_rsc(struct daio_mgr *mgr,
536 const struct daio_desc *desc,
537 struct daio **rdaio)
538{
539 int err;
540 struct dai *dai = NULL;
541 struct dao *dao = NULL;
542 unsigned long flags;
543
544 *rdaio = NULL;
545
546 /* Check whether there are sufficient daio resources to meet request. */
547 spin_lock_irqsave(&mgr->mgr_lock, flags);
548 err = daio_mgr_get_rsc(&mgr->mgr, desc->type);
549 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
550 if (err) {
551 printk(KERN_ERR "Can't meet DAIO resource request!\n");
552 return err;
553 }
554
555 /* Allocate mem for daio resource */
556 if (desc->type <= DAIO_OUT_MAX) {
557 dao = kzalloc(sizeof(*dao), GFP_KERNEL);
558 if (NULL == dao) {
559 err = -ENOMEM;
560 goto error;
561 }
562 err = dao_rsc_init(dao, desc, mgr);
563 if (err)
564 goto error;
565
566 *rdaio = &dao->daio;
567 } else {
568 dai = kzalloc(sizeof(*dai), GFP_KERNEL);
569 if (NULL == dai) {
570 err = -ENOMEM;
571 goto error;
572 }
573 err = dai_rsc_init(dai, desc, mgr);
574 if (err)
575 goto error;
576
577 *rdaio = &dai->daio;
578 }
579
580 mgr->daio_enable(mgr, *rdaio);
581 mgr->commit_write(mgr);
582
583 return 0;
584
585error:
586 if (NULL != dao)
587 kfree(dao);
588 else if (NULL != dai)
589 kfree(dai);
590
591 spin_lock_irqsave(&mgr->mgr_lock, flags);
592 daio_mgr_put_rsc(&mgr->mgr, desc->type);
593 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
594 return err;
595}
596
597static int put_daio_rsc(struct daio_mgr *mgr, struct daio *daio)
598{
599 unsigned long flags;
600
601 mgr->daio_disable(mgr, daio);
602 mgr->commit_write(mgr);
603
604 spin_lock_irqsave(&mgr->mgr_lock, flags);
605 daio_mgr_put_rsc(&mgr->mgr, daio->type);
606 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
607
608 if (daio->type <= DAIO_OUT_MAX) {
609 dao_rsc_uninit(container_of(daio, struct dao, daio));
610 kfree(container_of(daio, struct dao, daio));
611 } else {
612 dai_rsc_uninit(container_of(daio, struct dai, daio));
613 kfree(container_of(daio, struct dai, daio));
614 }
615
616 return 0;
617}
618
619static int daio_mgr_enb_daio(struct daio_mgr *mgr, struct daio *daio)
620{
621 struct hw *hw = mgr->mgr.hw;
622
623 if (DAIO_OUT_MAX >= daio->type) {
624 hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk,
625 daio_device_index(daio->type, hw));
626 } else {
627 hw->daio_mgr_enb_dai(mgr->mgr.ctrl_blk,
628 daio_device_index(daio->type, hw));
629 }
630 return 0;
631}
632
633static int daio_mgr_dsb_daio(struct daio_mgr *mgr, struct daio *daio)
634{
635 struct hw *hw = mgr->mgr.hw;
636
637 if (DAIO_OUT_MAX >= daio->type) {
638 hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk,
639 daio_device_index(daio->type, hw));
640 } else {
641 hw->daio_mgr_dsb_dai(mgr->mgr.ctrl_blk,
642 daio_device_index(daio->type, hw));
643 }
644 return 0;
645}
646
647static int daio_map_op(void *data, struct imapper *entry)
648{
649 struct rsc_mgr *mgr = &((struct daio_mgr *)data)->mgr;
650 struct hw *hw = mgr->hw;
651
652 hw->daio_mgr_set_imaparc(mgr->ctrl_blk, entry->slot);
653 hw->daio_mgr_set_imapnxt(mgr->ctrl_blk, entry->next);
654 hw->daio_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr);
655 hw->daio_mgr_commit_write(mgr->hw, mgr->ctrl_blk);
656
657 return 0;
658}
659
660static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry)
661{
662 unsigned long flags;
663 int err;
664
665 spin_lock_irqsave(&mgr->imap_lock, flags);
666 if ((0 == entry->addr) && (mgr->init_imap_added)) {
667 input_mapper_delete(&mgr->imappers, mgr->init_imap,
668 daio_map_op, mgr);
669 mgr->init_imap_added = 0;
670 }
671 err = input_mapper_add(&mgr->imappers, entry, daio_map_op, mgr);
672 spin_unlock_irqrestore(&mgr->imap_lock, flags);
673
674 return err;
675}
676
677static int daio_imap_delete(struct daio_mgr *mgr, struct imapper *entry)
678{
679 unsigned long flags;
680 int err;
681
682 spin_lock_irqsave(&mgr->imap_lock, flags);
683 err = input_mapper_delete(&mgr->imappers, entry, daio_map_op, mgr);
684 if (list_empty(&mgr->imappers)) {
685 input_mapper_add(&mgr->imappers, mgr->init_imap,
686 daio_map_op, mgr);
687 mgr->init_imap_added = 1;
688 }
689 spin_unlock_irqrestore(&mgr->imap_lock, flags);
690
691 return err;
692}
693
694static int daio_mgr_commit_write(struct daio_mgr *mgr)
695{
696 struct hw *hw = mgr->mgr.hw;
697
698 hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
699 return 0;
700}
701
702int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
703{
704 int err, i;
705 struct daio_mgr *daio_mgr;
706 struct imapper *entry;
707
708 *rdaio_mgr = NULL;
709 daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL);
710 if (NULL == daio_mgr)
711 return -ENOMEM;
712
713 err = rsc_mgr_init(&daio_mgr->mgr, DAIO, DAIO_RESOURCE_NUM, hw);
714 if (err)
715 goto error1;
716
717 spin_lock_init(&daio_mgr->mgr_lock);
718 spin_lock_init(&daio_mgr->imap_lock);
719 INIT_LIST_HEAD(&daio_mgr->imappers);
720 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
721 if (NULL == entry) {
722 err = -ENOMEM;
723 goto error2;
724 }
725 entry->slot = entry->addr = entry->next = entry->user = 0;
726 list_add(&entry->list, &daio_mgr->imappers);
727 daio_mgr->init_imap = entry;
728 daio_mgr->init_imap_added = 1;
729
730 daio_mgr->get_daio = get_daio_rsc;
731 daio_mgr->put_daio = put_daio_rsc;
732 daio_mgr->daio_enable = daio_mgr_enb_daio;
733 daio_mgr->daio_disable = daio_mgr_dsb_daio;
734 daio_mgr->imap_add = daio_imap_add;
735 daio_mgr->imap_delete = daio_imap_delete;
736 daio_mgr->commit_write = daio_mgr_commit_write;
737
738 for (i = 0; i < 8; i++) {
739 ((struct hw *)hw)->daio_mgr_dsb_dao(daio_mgr->mgr.ctrl_blk, i);
740 ((struct hw *)hw)->daio_mgr_dsb_dai(daio_mgr->mgr.ctrl_blk, i);
741 }
742 ((struct hw *)hw)->daio_mgr_commit_write(hw, daio_mgr->mgr.ctrl_blk);
743
744 *rdaio_mgr = daio_mgr;
745
746 return 0;
747
748error2:
749 rsc_mgr_uninit(&daio_mgr->mgr);
750error1:
751 kfree(daio_mgr);
752 return err;
753}
754
755int daio_mgr_destroy(struct daio_mgr *daio_mgr)
756{
757 unsigned long flags;
758
759 /* free daio input mapper list */
760 spin_lock_irqsave(&daio_mgr->imap_lock, flags);
761 free_input_mapper_list(&daio_mgr->imappers);
762 spin_unlock_irqrestore(&daio_mgr->imap_lock, flags);
763
764 rsc_mgr_uninit(&daio_mgr->mgr);
765 kfree(daio_mgr);
766
767 return 0;
768}
769
diff --git a/sound/pci/ctxfi/ctdaio.h b/sound/pci/ctxfi/ctdaio.h
new file mode 100644
index 000000000000..0f52ce571ee8
--- /dev/null
+++ b/sound/pci/ctxfi/ctdaio.h
@@ -0,0 +1,122 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctdaio.h
9 *
10 * @Brief
11 * This file contains the definition of Digital Audio Input Output
12 * resource management object.
13 *
14 * @Author Liu Chun
15 * @Date May 23 2008
16 *
17 */
18
19#ifndef CTDAIO_H
20#define CTDAIO_H
21
22#include "ctresource.h"
23#include "ctimap.h"
24#include <linux/spinlock.h>
25#include <linux/list.h>
26
27/* Define the descriptor of a daio resource */
28enum DAIOTYP {
29 LINEO1,
30 LINEO2,
31 LINEO3,
32 LINEO4,
33 SPDIFOO, /* S/PDIF Out (Flexijack/Optical) */
34 LINEIM,
35 SPDIFIO, /* S/PDIF In (Flexijack/Optical) on the card */
36 SPDIFI1, /* S/PDIF In on internal Drive Bay */
37 NUM_DAIOTYP
38};
39
40struct dao_rsc_ops;
41struct dai_rsc_ops;
42struct daio_mgr;
43
44struct daio {
45 struct rsc rscl; /* Basic resource info for left TX/RX */
46 struct rsc rscr; /* Basic resource info for right TX/RX */
47 enum DAIOTYP type;
48};
49
50struct dao {
51 struct daio daio;
52 struct dao_rsc_ops *ops; /* DAO specific operations */
53 struct imapper **imappers;
54 struct daio_mgr *mgr;
55 void *hw;
56 void *ctrl_blk;
57};
58
59struct dai {
60 struct daio daio;
61 struct dai_rsc_ops *ops; /* DAI specific operations */
62 void *hw;
63 void *ctrl_blk;
64};
65
66struct dao_desc {
67 unsigned int msr:4;
68 unsigned int passthru:1;
69};
70
71struct dao_rsc_ops {
72 int (*set_spos)(struct dao *dao, unsigned int spos);
73 int (*commit_write)(struct dao *dao);
74 int (*get_spos)(struct dao *dao, unsigned int *spos);
75 int (*reinit)(struct dao *dao, const struct dao_desc *desc);
76 int (*set_left_input)(struct dao *dao, struct rsc *input);
77 int (*set_right_input)(struct dao *dao, struct rsc *input);
78 int (*clear_left_input)(struct dao *dao);
79 int (*clear_right_input)(struct dao *dao);
80};
81
82struct dai_rsc_ops {
83 int (*set_srt_srcl)(struct dai *dai, struct rsc *src);
84 int (*set_srt_srcr)(struct dai *dai, struct rsc *src);
85 int (*set_srt_msr)(struct dai *dai, unsigned int msr);
86 int (*set_enb_src)(struct dai *dai, unsigned int enb);
87 int (*set_enb_srt)(struct dai *dai, unsigned int enb);
88 int (*commit_write)(struct dai *dai);
89};
90
91/* Define daio resource request description info */
92struct daio_desc {
93 unsigned int type:4;
94 unsigned int msr:4;
95 unsigned int passthru:1;
96};
97
98struct daio_mgr {
99 struct rsc_mgr mgr; /* Basic resource manager info */
100 spinlock_t mgr_lock;
101 spinlock_t imap_lock;
102 struct list_head imappers;
103 struct imapper *init_imap;
104 unsigned int init_imap_added;
105
106 /* request one daio resource */
107 int (*get_daio)(struct daio_mgr *mgr,
108 const struct daio_desc *desc, struct daio **rdaio);
109 /* return one daio resource */
110 int (*put_daio)(struct daio_mgr *mgr, struct daio *daio);
111 int (*daio_enable)(struct daio_mgr *mgr, struct daio *daio);
112 int (*daio_disable)(struct daio_mgr *mgr, struct daio *daio);
113 int (*imap_add)(struct daio_mgr *mgr, struct imapper *entry);
114 int (*imap_delete)(struct daio_mgr *mgr, struct imapper *entry);
115 int (*commit_write)(struct daio_mgr *mgr);
116};
117
118/* Constructor and destructor of daio resource manager */
119int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr);
120int daio_mgr_destroy(struct daio_mgr *daio_mgr);
121
122#endif /* CTDAIO_H */
diff --git a/sound/pci/ctxfi/cthardware.c b/sound/pci/ctxfi/cthardware.c
new file mode 100644
index 000000000000..8e64f4862e85
--- /dev/null
+++ b/sound/pci/ctxfi/cthardware.c
@@ -0,0 +1,91 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File cthardware.c
9 *
10 * @Brief
11 * This file contains the implementation of hardware access methord.
12 *
13 * @Author Liu Chun
14 * @Date Jun 26 2008
15 *
16 */
17
18#include "cthardware.h"
19#include "cthw20k1.h"
20#include "cthw20k2.h"
21#include <linux/bug.h>
22
23int __devinit create_hw_obj(struct pci_dev *pci, enum CHIPTYP chip_type,
24 enum CTCARDS model, struct hw **rhw)
25{
26 int err;
27
28 switch (chip_type) {
29 case ATC20K1:
30 err = create_20k1_hw_obj(rhw);
31 break;
32 case ATC20K2:
33 err = create_20k2_hw_obj(rhw);
34 break;
35 default:
36 err = -ENODEV;
37 break;
38 }
39 if (err)
40 return err;
41
42 (*rhw)->pci = pci;
43 (*rhw)->chip_type = chip_type;
44 (*rhw)->model = model;
45
46 return 0;
47}
48
49int destroy_hw_obj(struct hw *hw)
50{
51 int err;
52
53 switch (hw->pci->device) {
54 case 0x0005: /* 20k1 device */
55 err = destroy_20k1_hw_obj(hw);
56 break;
57 case 0x000B: /* 20k2 device */
58 err = destroy_20k2_hw_obj(hw);
59 break;
60 default:
61 err = -ENODEV;
62 break;
63 }
64
65 return err;
66}
67
68unsigned int get_field(unsigned int data, unsigned int field)
69{
70 int i;
71
72 BUG_ON(!field);
73 /* @field should always be greater than 0 */
74 for (i = 0; !(field & (1 << i)); )
75 i++;
76
77 return (data & field) >> i;
78}
79
80void set_field(unsigned int *data, unsigned int field, unsigned int value)
81{
82 int i;
83
84 BUG_ON(!field);
85 /* @field should always be greater than 0 */
86 for (i = 0; !(field & (1 << i)); )
87 i++;
88
89 *data = (*data & (~field)) | ((value << i) & field);
90}
91
diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
new file mode 100644
index 000000000000..4a8e04f090a4
--- /dev/null
+++ b/sound/pci/ctxfi/cthardware.h
@@ -0,0 +1,196 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File cthardware.h
9 *
10 * @Brief
11 * This file contains the definition of hardware access methord.
12 *
13 * @Author Liu Chun
14 * @Date May 13 2008
15 *
16 */
17
18#ifndef CTHARDWARE_H
19#define CTHARDWARE_H
20
21#include <linux/types.h>
22#include <linux/pci.h>
23
24enum CHIPTYP {
25 ATC20K1,
26 ATC20K2,
27 ATCNONE
28};
29
30enum CTCARDS {
31 /* 20k1 models */
32 CTSB055X,
33 CTSB073X,
34 CTUAA,
35 CT20K1_UNKNOWN,
36 /* 20k2 models */
37 CTSB0760,
38 CTHENDRIX,
39 CTSB0880,
40 NUM_CTCARDS /* This should always be the last */
41};
42
43/* Type of input source for ADC */
44enum ADCSRC{
45 ADC_MICIN,
46 ADC_LINEIN,
47 ADC_VIDEO,
48 ADC_AUX,
49 ADC_NONE /* Switch to digital input */
50};
51
52struct card_conf {
53 /* device virtual mem page table page physical addr
54 * (supporting one page table page now) */
55 unsigned long vm_pgt_phys;
56 unsigned int rsr; /* reference sample rate in Hzs*/
57 unsigned int msr; /* master sample rate in rsrs */
58};
59
60struct hw {
61 int (*card_init)(struct hw *hw, struct card_conf *info);
62 int (*card_stop)(struct hw *hw);
63 int (*pll_init)(struct hw *hw, unsigned int rsr);
64 int (*is_adc_source_selected)(struct hw *hw, enum ADCSRC source);
65 int (*select_adc_source)(struct hw *hw, enum ADCSRC source);
66 int (*have_digit_io_switch)(struct hw *hw);
67
68 /* SRC operations */
69 int (*src_rsc_get_ctrl_blk)(void **rblk);
70 int (*src_rsc_put_ctrl_blk)(void *blk);
71 int (*src_set_state)(void *blk, unsigned int state);
72 int (*src_set_bm)(void *blk, unsigned int bm);
73 int (*src_set_rsr)(void *blk, unsigned int rsr);
74 int (*src_set_sf)(void *blk, unsigned int sf);
75 int (*src_set_wr)(void *blk, unsigned int wr);
76 int (*src_set_pm)(void *blk, unsigned int pm);
77 int (*src_set_rom)(void *blk, unsigned int rom);
78 int (*src_set_vo)(void *blk, unsigned int vo);
79 int (*src_set_st)(void *blk, unsigned int st);
80 int (*src_set_ie)(void *blk, unsigned int ie);
81 int (*src_set_ilsz)(void *blk, unsigned int ilsz);
82 int (*src_set_bp)(void *blk, unsigned int bp);
83 int (*src_set_cisz)(void *blk, unsigned int cisz);
84 int (*src_set_ca)(void *blk, unsigned int ca);
85 int (*src_set_sa)(void *blk, unsigned int sa);
86 int (*src_set_la)(void *blk, unsigned int la);
87 int (*src_set_pitch)(void *blk, unsigned int pitch);
88 int (*src_set_clear_zbufs)(void *blk, unsigned int clear);
89 int (*src_set_dirty)(void *blk, unsigned int flags);
90 int (*src_set_dirty_all)(void *blk);
91 int (*src_commit_write)(struct hw *hw, unsigned int idx, void *blk);
92 int (*src_get_ca)(struct hw *hw, unsigned int idx, void *blk);
93 unsigned int (*src_get_dirty)(void *blk);
94 unsigned int (*src_dirty_conj_mask)(void);
95 int (*src_mgr_get_ctrl_blk)(void **rblk);
96 int (*src_mgr_put_ctrl_blk)(void *blk);
97 /* syncly enable src @idx */
98 int (*src_mgr_enbs_src)(void *blk, unsigned int idx);
99 /* enable src @idx */
100 int (*src_mgr_enb_src)(void *blk, unsigned int idx);
101 /* disable src @idx */
102 int (*src_mgr_dsb_src)(void *blk, unsigned int idx);
103 int (*src_mgr_commit_write)(struct hw *hw, void *blk);
104
105 /* SRC Input Mapper operations */
106 int (*srcimp_mgr_get_ctrl_blk)(void **rblk);
107 int (*srcimp_mgr_put_ctrl_blk)(void *blk);
108 int (*srcimp_mgr_set_imaparc)(void *blk, unsigned int slot);
109 int (*srcimp_mgr_set_imapuser)(void *blk, unsigned int user);
110 int (*srcimp_mgr_set_imapnxt)(void *blk, unsigned int next);
111 int (*srcimp_mgr_set_imapaddr)(void *blk, unsigned int addr);
112 int (*srcimp_mgr_commit_write)(struct hw *hw, void *blk);
113
114 /* AMIXER operations */
115 int (*amixer_rsc_get_ctrl_blk)(void **rblk);
116 int (*amixer_rsc_put_ctrl_blk)(void *blk);
117 int (*amixer_mgr_get_ctrl_blk)(void **rblk);
118 int (*amixer_mgr_put_ctrl_blk)(void *blk);
119 int (*amixer_set_mode)(void *blk, unsigned int mode);
120 int (*amixer_set_iv)(void *blk, unsigned int iv);
121 int (*amixer_set_x)(void *blk, unsigned int x);
122 int (*amixer_set_y)(void *blk, unsigned int y);
123 int (*amixer_set_sadr)(void *blk, unsigned int sadr);
124 int (*amixer_set_se)(void *blk, unsigned int se);
125 int (*amixer_set_dirty)(void *blk, unsigned int flags);
126 int (*amixer_set_dirty_all)(void *blk);
127 int (*amixer_commit_write)(struct hw *hw, unsigned int idx, void *blk);
128 int (*amixer_get_y)(void *blk);
129 unsigned int (*amixer_get_dirty)(void *blk);
130
131 /* DAIO operations */
132 int (*dai_get_ctrl_blk)(void **rblk);
133 int (*dai_put_ctrl_blk)(void *blk);
134 int (*dai_srt_set_srco)(void *blk, unsigned int src);
135 int (*dai_srt_set_srcm)(void *blk, unsigned int src);
136 int (*dai_srt_set_rsr)(void *blk, unsigned int rsr);
137 int (*dai_srt_set_drat)(void *blk, unsigned int drat);
138 int (*dai_srt_set_ec)(void *blk, unsigned int ec);
139 int (*dai_srt_set_et)(void *blk, unsigned int et);
140 int (*dai_commit_write)(struct hw *hw, unsigned int idx, void *blk);
141 int (*dao_get_ctrl_blk)(void **rblk);
142 int (*dao_put_ctrl_blk)(void *blk);
143 int (*dao_set_spos)(void *blk, unsigned int spos);
144 int (*dao_commit_write)(struct hw *hw, unsigned int idx, void *blk);
145 int (*dao_get_spos)(void *blk, unsigned int *spos);
146
147 int (*daio_mgr_get_ctrl_blk)(struct hw *hw, void **rblk);
148 int (*daio_mgr_put_ctrl_blk)(void *blk);
149 int (*daio_mgr_enb_dai)(void *blk, unsigned int idx);
150 int (*daio_mgr_dsb_dai)(void *blk, unsigned int idx);
151 int (*daio_mgr_enb_dao)(void *blk, unsigned int idx);
152 int (*daio_mgr_dsb_dao)(void *blk, unsigned int idx);
153 int (*daio_mgr_dao_init)(void *blk, unsigned int idx,
154 unsigned int conf);
155 int (*daio_mgr_set_imaparc)(void *blk, unsigned int slot);
156 int (*daio_mgr_set_imapnxt)(void *blk, unsigned int next);
157 int (*daio_mgr_set_imapaddr)(void *blk, unsigned int addr);
158 int (*daio_mgr_commit_write)(struct hw *hw, void *blk);
159
160 int (*set_timer_irq)(struct hw *hw, int enable);
161 int (*set_timer_tick)(struct hw *hw, unsigned int tick);
162 unsigned int (*get_wc)(struct hw *hw);
163
164 void (*irq_callback)(void *data, unsigned int bit);
165 void *irq_callback_data;
166
167 struct pci_dev *pci; /* the pci kernel structure of this card */
168 int irq;
169 unsigned long io_base;
170 unsigned long mem_base;
171
172 enum CHIPTYP chip_type;
173 enum CTCARDS model;
174};
175
176int create_hw_obj(struct pci_dev *pci, enum CHIPTYP chip_type,
177 enum CTCARDS model, struct hw **rhw);
178int destroy_hw_obj(struct hw *hw);
179
180unsigned int get_field(unsigned int data, unsigned int field);
181void set_field(unsigned int *data, unsigned int field, unsigned int value);
182
183/* IRQ bits */
184#define PLL_INT (1 << 10) /* PLL input-clock out-of-range */
185#define FI_INT (1 << 9) /* forced interrupt */
186#define IT_INT (1 << 8) /* timer interrupt */
187#define PCI_INT (1 << 7) /* PCI bus error pending */
188#define URT_INT (1 << 6) /* UART Tx/Rx */
189#define GPI_INT (1 << 5) /* GPI pin */
190#define MIX_INT (1 << 4) /* mixer parameter segment FIFO channels */
191#define DAI_INT (1 << 3) /* DAI (SR-tracker or SPDIF-receiver) */
192#define TP_INT (1 << 2) /* transport priority queue */
193#define DSP_INT (1 << 1) /* DSP */
194#define SRC_INT (1 << 0) /* SRC channels */
195
196#endif /* CTHARDWARE_H */
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
new file mode 100644
index 000000000000..cb69d9ddfbe3
--- /dev/null
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -0,0 +1,2248 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File cthw20k1.c
9 *
10 * @Brief
11 * This file contains the implementation of hardware access methord for 20k1.
12 *
13 * @Author Liu Chun
14 * @Date Jun 24 2008
15 *
16 */
17
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/pci.h>
21#include <linux/io.h>
22#include <linux/string.h>
23#include <linux/spinlock.h>
24#include <linux/kernel.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include "cthw20k1.h"
28#include "ct20k1reg.h"
29
30#if BITS_PER_LONG == 32
31#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
32#else
33#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
34#endif
35
36struct hw20k1 {
37 struct hw hw;
38 spinlock_t reg_20k1_lock;
39 spinlock_t reg_pci_lock;
40};
41
42static u32 hw_read_20kx(struct hw *hw, u32 reg);
43static void hw_write_20kx(struct hw *hw, u32 reg, u32 data);
44static u32 hw_read_pci(struct hw *hw, u32 reg);
45static void hw_write_pci(struct hw *hw, u32 reg, u32 data);
46
47/*
48 * Type definition block.
49 * The layout of control structures can be directly applied on 20k2 chip.
50 */
51
52/*
53 * SRC control block definitions.
54 */
55
56/* SRC resource control block */
57#define SRCCTL_STATE 0x00000007
58#define SRCCTL_BM 0x00000008
59#define SRCCTL_RSR 0x00000030
60#define SRCCTL_SF 0x000001C0
61#define SRCCTL_WR 0x00000200
62#define SRCCTL_PM 0x00000400
63#define SRCCTL_ROM 0x00001800
64#define SRCCTL_VO 0x00002000
65#define SRCCTL_ST 0x00004000
66#define SRCCTL_IE 0x00008000
67#define SRCCTL_ILSZ 0x000F0000
68#define SRCCTL_BP 0x00100000
69
70#define SRCCCR_CISZ 0x000007FF
71#define SRCCCR_CWA 0x001FF800
72#define SRCCCR_D 0x00200000
73#define SRCCCR_RS 0x01C00000
74#define SRCCCR_NAL 0x3E000000
75#define SRCCCR_RA 0xC0000000
76
77#define SRCCA_CA 0x03FFFFFF
78#define SRCCA_RS 0x1C000000
79#define SRCCA_NAL 0xE0000000
80
81#define SRCSA_SA 0x03FFFFFF
82
83#define SRCLA_LA 0x03FFFFFF
84
85/* Mixer Parameter Ring ram Low and Hight register.
86 * Fixed-point value in 8.24 format for parameter channel */
87#define MPRLH_PITCH 0xFFFFFFFF
88
89/* SRC resource register dirty flags */
90union src_dirty {
91 struct {
92 u16 ctl:1;
93 u16 ccr:1;
94 u16 sa:1;
95 u16 la:1;
96 u16 ca:1;
97 u16 mpr:1;
98 u16 czbfs:1; /* Clear Z-Buffers */
99 u16 rsv:9;
100 } bf;
101 u16 data;
102};
103
104struct src_rsc_ctrl_blk {
105 unsigned int ctl;
106 unsigned int ccr;
107 unsigned int ca;
108 unsigned int sa;
109 unsigned int la;
110 unsigned int mpr;
111 union src_dirty dirty;
112};
113
114/* SRC manager control block */
115union src_mgr_dirty {
116 struct {
117 u16 enb0:1;
118 u16 enb1:1;
119 u16 enb2:1;
120 u16 enb3:1;
121 u16 enb4:1;
122 u16 enb5:1;
123 u16 enb6:1;
124 u16 enb7:1;
125 u16 enbsa:1;
126 u16 rsv:7;
127 } bf;
128 u16 data;
129};
130
131struct src_mgr_ctrl_blk {
132 unsigned int enbsa;
133 unsigned int enb[8];
134 union src_mgr_dirty dirty;
135};
136
137/* SRCIMP manager control block */
138#define SRCAIM_ARC 0x00000FFF
139#define SRCAIM_NXT 0x00FF0000
140#define SRCAIM_SRC 0xFF000000
141
142struct srcimap {
143 unsigned int srcaim;
144 unsigned int idx;
145};
146
147/* SRCIMP manager register dirty flags */
148union srcimp_mgr_dirty {
149 struct {
150 u16 srcimap:1;
151 u16 rsv:15;
152 } bf;
153 u16 data;
154};
155
156struct srcimp_mgr_ctrl_blk {
157 struct srcimap srcimap;
158 union srcimp_mgr_dirty dirty;
159};
160
161/*
162 * Function implementation block.
163 */
164
165static int src_get_rsc_ctrl_blk(void **rblk)
166{
167 struct src_rsc_ctrl_blk *blk;
168
169 *rblk = NULL;
170 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
171 if (NULL == blk)
172 return -ENOMEM;
173
174 *rblk = blk;
175
176 return 0;
177}
178
179static int src_put_rsc_ctrl_blk(void *blk)
180{
181 kfree((struct src_rsc_ctrl_blk *)blk);
182
183 return 0;
184}
185
186static int src_set_state(void *blk, unsigned int state)
187{
188 struct src_rsc_ctrl_blk *ctl = blk;
189
190 set_field(&ctl->ctl, SRCCTL_STATE, state);
191 ctl->dirty.bf.ctl = 1;
192 return 0;
193}
194
195static int src_set_bm(void *blk, unsigned int bm)
196{
197 struct src_rsc_ctrl_blk *ctl = blk;
198
199 set_field(&ctl->ctl, SRCCTL_BM, bm);
200 ctl->dirty.bf.ctl = 1;
201 return 0;
202}
203
204static int src_set_rsr(void *blk, unsigned int rsr)
205{
206 struct src_rsc_ctrl_blk *ctl = blk;
207
208 set_field(&ctl->ctl, SRCCTL_RSR, rsr);
209 ctl->dirty.bf.ctl = 1;
210 return 0;
211}
212
213static int src_set_sf(void *blk, unsigned int sf)
214{
215 struct src_rsc_ctrl_blk *ctl = blk;
216
217 set_field(&ctl->ctl, SRCCTL_SF, sf);
218 ctl->dirty.bf.ctl = 1;
219 return 0;
220}
221
222static int src_set_wr(void *blk, unsigned int wr)
223{
224 struct src_rsc_ctrl_blk *ctl = blk;
225
226 set_field(&ctl->ctl, SRCCTL_WR, wr);
227 ctl->dirty.bf.ctl = 1;
228 return 0;
229}
230
231static int src_set_pm(void *blk, unsigned int pm)
232{
233 struct src_rsc_ctrl_blk *ctl = blk;
234
235 set_field(&ctl->ctl, SRCCTL_PM, pm);
236 ctl->dirty.bf.ctl = 1;
237 return 0;
238}
239
240static int src_set_rom(void *blk, unsigned int rom)
241{
242 struct src_rsc_ctrl_blk *ctl = blk;
243
244 set_field(&ctl->ctl, SRCCTL_ROM, rom);
245 ctl->dirty.bf.ctl = 1;
246 return 0;
247}
248
249static int src_set_vo(void *blk, unsigned int vo)
250{
251 struct src_rsc_ctrl_blk *ctl = blk;
252
253 set_field(&ctl->ctl, SRCCTL_VO, vo);
254 ctl->dirty.bf.ctl = 1;
255 return 0;
256}
257
258static int src_set_st(void *blk, unsigned int st)
259{
260 struct src_rsc_ctrl_blk *ctl = blk;
261
262 set_field(&ctl->ctl, SRCCTL_ST, st);
263 ctl->dirty.bf.ctl = 1;
264 return 0;
265}
266
267static int src_set_ie(void *blk, unsigned int ie)
268{
269 struct src_rsc_ctrl_blk *ctl = blk;
270
271 set_field(&ctl->ctl, SRCCTL_IE, ie);
272 ctl->dirty.bf.ctl = 1;
273 return 0;
274}
275
276static int src_set_ilsz(void *blk, unsigned int ilsz)
277{
278 struct src_rsc_ctrl_blk *ctl = blk;
279
280 set_field(&ctl->ctl, SRCCTL_ILSZ, ilsz);
281 ctl->dirty.bf.ctl = 1;
282 return 0;
283}
284
285static int src_set_bp(void *blk, unsigned int bp)
286{
287 struct src_rsc_ctrl_blk *ctl = blk;
288
289 set_field(&ctl->ctl, SRCCTL_BP, bp);
290 ctl->dirty.bf.ctl = 1;
291 return 0;
292}
293
294static int src_set_cisz(void *blk, unsigned int cisz)
295{
296 struct src_rsc_ctrl_blk *ctl = blk;
297
298 set_field(&ctl->ccr, SRCCCR_CISZ, cisz);
299 ctl->dirty.bf.ccr = 1;
300 return 0;
301}
302
303static int src_set_ca(void *blk, unsigned int ca)
304{
305 struct src_rsc_ctrl_blk *ctl = blk;
306
307 set_field(&ctl->ca, SRCCA_CA, ca);
308 ctl->dirty.bf.ca = 1;
309 return 0;
310}
311
312static int src_set_sa(void *blk, unsigned int sa)
313{
314 struct src_rsc_ctrl_blk *ctl = blk;
315
316 set_field(&ctl->sa, SRCSA_SA, sa);
317 ctl->dirty.bf.sa = 1;
318 return 0;
319}
320
321static int src_set_la(void *blk, unsigned int la)
322{
323 struct src_rsc_ctrl_blk *ctl = blk;
324
325 set_field(&ctl->la, SRCLA_LA, la);
326 ctl->dirty.bf.la = 1;
327 return 0;
328}
329
330static int src_set_pitch(void *blk, unsigned int pitch)
331{
332 struct src_rsc_ctrl_blk *ctl = blk;
333
334 set_field(&ctl->mpr, MPRLH_PITCH, pitch);
335 ctl->dirty.bf.mpr = 1;
336 return 0;
337}
338
339static int src_set_clear_zbufs(void *blk, unsigned int clear)
340{
341 ((struct src_rsc_ctrl_blk *)blk)->dirty.bf.czbfs = (clear ? 1 : 0);
342 return 0;
343}
344
345static int src_set_dirty(void *blk, unsigned int flags)
346{
347 ((struct src_rsc_ctrl_blk *)blk)->dirty.data = (flags & 0xffff);
348 return 0;
349}
350
351static int src_set_dirty_all(void *blk)
352{
353 ((struct src_rsc_ctrl_blk *)blk)->dirty.data = ~(0x0);
354 return 0;
355}
356
357#define AR_SLOT_SIZE 4096
358#define AR_SLOT_BLOCK_SIZE 16
359#define AR_PTS_PITCH 6
360#define AR_PARAM_SRC_OFFSET 0x60
361
362static unsigned int src_param_pitch_mixer(unsigned int src_idx)
363{
364 return ((src_idx << 4) + AR_PTS_PITCH + AR_SLOT_SIZE
365 - AR_PARAM_SRC_OFFSET) % AR_SLOT_SIZE;
366
367}
368
369static int src_commit_write(struct hw *hw, unsigned int idx, void *blk)
370{
371 struct src_rsc_ctrl_blk *ctl = blk;
372 int i;
373
374 if (ctl->dirty.bf.czbfs) {
375 /* Clear Z-Buffer registers */
376 for (i = 0; i < 8; i++)
377 hw_write_20kx(hw, SRCUPZ+idx*0x100+i*0x4, 0);
378
379 for (i = 0; i < 4; i++)
380 hw_write_20kx(hw, SRCDN0Z+idx*0x100+i*0x4, 0);
381
382 for (i = 0; i < 8; i++)
383 hw_write_20kx(hw, SRCDN1Z+idx*0x100+i*0x4, 0);
384
385 ctl->dirty.bf.czbfs = 0;
386 }
387 if (ctl->dirty.bf.mpr) {
388 /* Take the parameter mixer resource in the same group as that
389 * the idx src is in for simplicity. Unlike src, all conjugate
390 * parameter mixer resources must be programmed for
391 * corresponding conjugate src resources. */
392 unsigned int pm_idx = src_param_pitch_mixer(idx);
393 hw_write_20kx(hw, PRING_LO_HI+4*pm_idx, ctl->mpr);
394 hw_write_20kx(hw, PMOPLO+8*pm_idx, 0x3);
395 hw_write_20kx(hw, PMOPHI+8*pm_idx, 0x0);
396 ctl->dirty.bf.mpr = 0;
397 }
398 if (ctl->dirty.bf.sa) {
399 hw_write_20kx(hw, SRCSA+idx*0x100, ctl->sa);
400 ctl->dirty.bf.sa = 0;
401 }
402 if (ctl->dirty.bf.la) {
403 hw_write_20kx(hw, SRCLA+idx*0x100, ctl->la);
404 ctl->dirty.bf.la = 0;
405 }
406 if (ctl->dirty.bf.ca) {
407 hw_write_20kx(hw, SRCCA+idx*0x100, ctl->ca);
408 ctl->dirty.bf.ca = 0;
409 }
410
411 /* Write srccf register */
412 hw_write_20kx(hw, SRCCF+idx*0x100, 0x0);
413
414 if (ctl->dirty.bf.ccr) {
415 hw_write_20kx(hw, SRCCCR+idx*0x100, ctl->ccr);
416 ctl->dirty.bf.ccr = 0;
417 }
418 if (ctl->dirty.bf.ctl) {
419 hw_write_20kx(hw, SRCCTL+idx*0x100, ctl->ctl);
420 ctl->dirty.bf.ctl = 0;
421 }
422
423 return 0;
424}
425
426static int src_get_ca(struct hw *hw, unsigned int idx, void *blk)
427{
428 struct src_rsc_ctrl_blk *ctl = blk;
429
430 ctl->ca = hw_read_20kx(hw, SRCCA+idx*0x100);
431 ctl->dirty.bf.ca = 0;
432
433 return get_field(ctl->ca, SRCCA_CA);
434}
435
436static unsigned int src_get_dirty(void *blk)
437{
438 return ((struct src_rsc_ctrl_blk *)blk)->dirty.data;
439}
440
441static unsigned int src_dirty_conj_mask(void)
442{
443 return 0x20;
444}
445
446static int src_mgr_enbs_src(void *blk, unsigned int idx)
447{
448 ((struct src_mgr_ctrl_blk *)blk)->enbsa = ~(0x0);
449 ((struct src_mgr_ctrl_blk *)blk)->dirty.bf.enbsa = 1;
450 ((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] |= (0x1 << (idx%32));
451 return 0;
452}
453
454static int src_mgr_enb_src(void *blk, unsigned int idx)
455{
456 ((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] |= (0x1 << (idx%32));
457 ((struct src_mgr_ctrl_blk *)blk)->dirty.data |= (0x1 << (idx/32));
458 return 0;
459}
460
461static int src_mgr_dsb_src(void *blk, unsigned int idx)
462{
463 ((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] &= ~(0x1 << (idx%32));
464 ((struct src_mgr_ctrl_blk *)blk)->dirty.data |= (0x1 << (idx/32));
465 return 0;
466}
467
468static int src_mgr_commit_write(struct hw *hw, void *blk)
469{
470 struct src_mgr_ctrl_blk *ctl = blk;
471 int i;
472 unsigned int ret;
473
474 if (ctl->dirty.bf.enbsa) {
475 do {
476 ret = hw_read_20kx(hw, SRCENBSTAT);
477 } while (ret & 0x1);
478 hw_write_20kx(hw, SRCENBS, ctl->enbsa);
479 ctl->dirty.bf.enbsa = 0;
480 }
481 for (i = 0; i < 8; i++) {
482 if ((ctl->dirty.data & (0x1 << i))) {
483 hw_write_20kx(hw, SRCENB+(i*0x100), ctl->enb[i]);
484 ctl->dirty.data &= ~(0x1 << i);
485 }
486 }
487
488 return 0;
489}
490
491static int src_mgr_get_ctrl_blk(void **rblk)
492{
493 struct src_mgr_ctrl_blk *blk;
494
495 *rblk = NULL;
496 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
497 if (NULL == blk)
498 return -ENOMEM;
499
500 *rblk = blk;
501
502 return 0;
503}
504
505static int src_mgr_put_ctrl_blk(void *blk)
506{
507 kfree((struct src_mgr_ctrl_blk *)blk);
508
509 return 0;
510}
511
512static int srcimp_mgr_get_ctrl_blk(void **rblk)
513{
514 struct srcimp_mgr_ctrl_blk *blk;
515
516 *rblk = NULL;
517 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
518 if (NULL == blk)
519 return -ENOMEM;
520
521 *rblk = blk;
522
523 return 0;
524}
525
526static int srcimp_mgr_put_ctrl_blk(void *blk)
527{
528 kfree((struct srcimp_mgr_ctrl_blk *)blk);
529
530 return 0;
531}
532
533static int srcimp_mgr_set_imaparc(void *blk, unsigned int slot)
534{
535 struct srcimp_mgr_ctrl_blk *ctl = blk;
536
537 set_field(&ctl->srcimap.srcaim, SRCAIM_ARC, slot);
538 ctl->dirty.bf.srcimap = 1;
539 return 0;
540}
541
542static int srcimp_mgr_set_imapuser(void *blk, unsigned int user)
543{
544 struct srcimp_mgr_ctrl_blk *ctl = blk;
545
546 set_field(&ctl->srcimap.srcaim, SRCAIM_SRC, user);
547 ctl->dirty.bf.srcimap = 1;
548 return 0;
549}
550
551static int srcimp_mgr_set_imapnxt(void *blk, unsigned int next)
552{
553 struct srcimp_mgr_ctrl_blk *ctl = blk;
554
555 set_field(&ctl->srcimap.srcaim, SRCAIM_NXT, next);
556 ctl->dirty.bf.srcimap = 1;
557 return 0;
558}
559
560static int srcimp_mgr_set_imapaddr(void *blk, unsigned int addr)
561{
562 struct srcimp_mgr_ctrl_blk *ctl = blk;
563
564 ctl->srcimap.idx = addr;
565 ctl->dirty.bf.srcimap = 1;
566 return 0;
567}
568
569static int srcimp_mgr_commit_write(struct hw *hw, void *blk)
570{
571 struct srcimp_mgr_ctrl_blk *ctl = blk;
572
573 if (ctl->dirty.bf.srcimap) {
574 hw_write_20kx(hw, SRCIMAP+ctl->srcimap.idx*0x100,
575 ctl->srcimap.srcaim);
576 ctl->dirty.bf.srcimap = 0;
577 }
578
579 return 0;
580}
581
582/*
583 * AMIXER control block definitions.
584 */
585
586#define AMOPLO_M 0x00000003
587#define AMOPLO_X 0x0003FFF0
588#define AMOPLO_Y 0xFFFC0000
589
590#define AMOPHI_SADR 0x000000FF
591#define AMOPHI_SE 0x80000000
592
593/* AMIXER resource register dirty flags */
594union amixer_dirty {
595 struct {
596 u16 amoplo:1;
597 u16 amophi:1;
598 u16 rsv:14;
599 } bf;
600 u16 data;
601};
602
603/* AMIXER resource control block */
604struct amixer_rsc_ctrl_blk {
605 unsigned int amoplo;
606 unsigned int amophi;
607 union amixer_dirty dirty;
608};
609
610static int amixer_set_mode(void *blk, unsigned int mode)
611{
612 struct amixer_rsc_ctrl_blk *ctl = blk;
613
614 set_field(&ctl->amoplo, AMOPLO_M, mode);
615 ctl->dirty.bf.amoplo = 1;
616 return 0;
617}
618
619static int amixer_set_iv(void *blk, unsigned int iv)
620{
621 /* 20k1 amixer does not have this field */
622 return 0;
623}
624
625static int amixer_set_x(void *blk, unsigned int x)
626{
627 struct amixer_rsc_ctrl_blk *ctl = blk;
628
629 set_field(&ctl->amoplo, AMOPLO_X, x);
630 ctl->dirty.bf.amoplo = 1;
631 return 0;
632}
633
634static int amixer_set_y(void *blk, unsigned int y)
635{
636 struct amixer_rsc_ctrl_blk *ctl = blk;
637
638 set_field(&ctl->amoplo, AMOPLO_Y, y);
639 ctl->dirty.bf.amoplo = 1;
640 return 0;
641}
642
643static int amixer_set_sadr(void *blk, unsigned int sadr)
644{
645 struct amixer_rsc_ctrl_blk *ctl = blk;
646
647 set_field(&ctl->amophi, AMOPHI_SADR, sadr);
648 ctl->dirty.bf.amophi = 1;
649 return 0;
650}
651
652static int amixer_set_se(void *blk, unsigned int se)
653{
654 struct amixer_rsc_ctrl_blk *ctl = blk;
655
656 set_field(&ctl->amophi, AMOPHI_SE, se);
657 ctl->dirty.bf.amophi = 1;
658 return 0;
659}
660
661static int amixer_set_dirty(void *blk, unsigned int flags)
662{
663 ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data = (flags & 0xffff);
664 return 0;
665}
666
667static int amixer_set_dirty_all(void *blk)
668{
669 ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data = ~(0x0);
670 return 0;
671}
672
673static int amixer_commit_write(struct hw *hw, unsigned int idx, void *blk)
674{
675 struct amixer_rsc_ctrl_blk *ctl = blk;
676
677 if (ctl->dirty.bf.amoplo || ctl->dirty.bf.amophi) {
678 hw_write_20kx(hw, AMOPLO+idx*8, ctl->amoplo);
679 ctl->dirty.bf.amoplo = 0;
680 hw_write_20kx(hw, AMOPHI+idx*8, ctl->amophi);
681 ctl->dirty.bf.amophi = 0;
682 }
683
684 return 0;
685}
686
687static int amixer_get_y(void *blk)
688{
689 struct amixer_rsc_ctrl_blk *ctl = blk;
690
691 return get_field(ctl->amoplo, AMOPLO_Y);
692}
693
694static unsigned int amixer_get_dirty(void *blk)
695{
696 return ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data;
697}
698
699static int amixer_rsc_get_ctrl_blk(void **rblk)
700{
701 struct amixer_rsc_ctrl_blk *blk;
702
703 *rblk = NULL;
704 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
705 if (NULL == blk)
706 return -ENOMEM;
707
708 *rblk = blk;
709
710 return 0;
711}
712
713static int amixer_rsc_put_ctrl_blk(void *blk)
714{
715 kfree((struct amixer_rsc_ctrl_blk *)blk);
716
717 return 0;
718}
719
720static int amixer_mgr_get_ctrl_blk(void **rblk)
721{
722 /*amixer_mgr_ctrl_blk_t *blk;*/
723
724 *rblk = NULL;
725 /*blk = kzalloc(sizeof(*blk), GFP_KERNEL);
726 if (NULL == blk)
727 return -ENOMEM;
728
729 *rblk = blk;*/
730
731 return 0;
732}
733
734static int amixer_mgr_put_ctrl_blk(void *blk)
735{
736 /*kfree((amixer_mgr_ctrl_blk_t *)blk);*/
737
738 return 0;
739}
740
741/*
742 * DAIO control block definitions.
743 */
744
745/* Receiver Sample Rate Tracker Control register */
746#define SRTCTL_SRCR 0x000000FF
747#define SRTCTL_SRCL 0x0000FF00
748#define SRTCTL_RSR 0x00030000
749#define SRTCTL_DRAT 0x000C0000
750#define SRTCTL_RLE 0x10000000
751#define SRTCTL_RLP 0x20000000
752#define SRTCTL_EC 0x40000000
753#define SRTCTL_ET 0x80000000
754
755/* DAIO Receiver register dirty flags */
756union dai_dirty {
757 struct {
758 u16 srtctl:1;
759 u16 rsv:15;
760 } bf;
761 u16 data;
762};
763
764/* DAIO Receiver control block */
765struct dai_ctrl_blk {
766 unsigned int srtctl;
767 union dai_dirty dirty;
768};
769
770/* S/PDIF Transmitter register dirty flags */
771union dao_dirty {
772 struct {
773 u16 spos:1;
774 u16 rsv:15;
775 } bf;
776 u16 data;
777};
778
779/* S/PDIF Transmitter control block */
780struct dao_ctrl_blk {
781 unsigned int spos; /* S/PDIF Output Channel Status Register */
782 union dao_dirty dirty;
783};
784
785/* Audio Input Mapper RAM */
786#define AIM_ARC 0x00000FFF
787#define AIM_NXT 0x007F0000
788
789struct daoimap {
790 unsigned int aim;
791 unsigned int idx;
792};
793
794/* I2S Transmitter/Receiver Control register */
795#define I2SCTL_EA 0x00000004
796#define I2SCTL_EI 0x00000010
797
798/* S/PDIF Transmitter Control register */
799#define SPOCTL_OE 0x00000001
800#define SPOCTL_OS 0x0000000E
801#define SPOCTL_RIV 0x00000010
802#define SPOCTL_LIV 0x00000020
803#define SPOCTL_SR 0x000000C0
804
805/* S/PDIF Receiver Control register */
806#define SPICTL_EN 0x00000001
807#define SPICTL_I24 0x00000002
808#define SPICTL_IB 0x00000004
809#define SPICTL_SM 0x00000008
810#define SPICTL_VM 0x00000010
811
812/* DAIO manager register dirty flags */
813union daio_mgr_dirty {
814 struct {
815 u32 i2soctl:4;
816 u32 i2sictl:4;
817 u32 spoctl:4;
818 u32 spictl:4;
819 u32 daoimap:1;
820 u32 rsv:15;
821 } bf;
822 u32 data;
823};
824
825/* DAIO manager control block */
826struct daio_mgr_ctrl_blk {
827 unsigned int i2sctl;
828 unsigned int spoctl;
829 unsigned int spictl;
830 struct daoimap daoimap;
831 union daio_mgr_dirty dirty;
832};
833
834static int dai_srt_set_srcr(void *blk, unsigned int src)
835{
836 struct dai_ctrl_blk *ctl = blk;
837
838 set_field(&ctl->srtctl, SRTCTL_SRCR, src);
839 ctl->dirty.bf.srtctl = 1;
840 return 0;
841}
842
843static int dai_srt_set_srcl(void *blk, unsigned int src)
844{
845 struct dai_ctrl_blk *ctl = blk;
846
847 set_field(&ctl->srtctl, SRTCTL_SRCL, src);
848 ctl->dirty.bf.srtctl = 1;
849 return 0;
850}
851
852static int dai_srt_set_rsr(void *blk, unsigned int rsr)
853{
854 struct dai_ctrl_blk *ctl = blk;
855
856 set_field(&ctl->srtctl, SRTCTL_RSR, rsr);
857 ctl->dirty.bf.srtctl = 1;
858 return 0;
859}
860
861static int dai_srt_set_drat(void *blk, unsigned int drat)
862{
863 struct dai_ctrl_blk *ctl = blk;
864
865 set_field(&ctl->srtctl, SRTCTL_DRAT, drat);
866 ctl->dirty.bf.srtctl = 1;
867 return 0;
868}
869
870static int dai_srt_set_ec(void *blk, unsigned int ec)
871{
872 struct dai_ctrl_blk *ctl = blk;
873
874 set_field(&ctl->srtctl, SRTCTL_EC, ec ? 1 : 0);
875 ctl->dirty.bf.srtctl = 1;
876 return 0;
877}
878
879static int dai_srt_set_et(void *blk, unsigned int et)
880{
881 struct dai_ctrl_blk *ctl = blk;
882
883 set_field(&ctl->srtctl, SRTCTL_ET, et ? 1 : 0);
884 ctl->dirty.bf.srtctl = 1;
885 return 0;
886}
887
888static int dai_commit_write(struct hw *hw, unsigned int idx, void *blk)
889{
890 struct dai_ctrl_blk *ctl = blk;
891
892 if (ctl->dirty.bf.srtctl) {
893 if (idx < 4) {
894 /* S/PDIF SRTs */
895 hw_write_20kx(hw, SRTSCTL+0x4*idx, ctl->srtctl);
896 } else {
897 /* I2S SRT */
898 hw_write_20kx(hw, SRTICTL, ctl->srtctl);
899 }
900 ctl->dirty.bf.srtctl = 0;
901 }
902
903 return 0;
904}
905
906static int dai_get_ctrl_blk(void **rblk)
907{
908 struct dai_ctrl_blk *blk;
909
910 *rblk = NULL;
911 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
912 if (NULL == blk)
913 return -ENOMEM;
914
915 *rblk = blk;
916
917 return 0;
918}
919
920static int dai_put_ctrl_blk(void *blk)
921{
922 kfree((struct dai_ctrl_blk *)blk);
923
924 return 0;
925}
926
927static int dao_set_spos(void *blk, unsigned int spos)
928{
929 ((struct dao_ctrl_blk *)blk)->spos = spos;
930 ((struct dao_ctrl_blk *)blk)->dirty.bf.spos = 1;
931 return 0;
932}
933
934static int dao_commit_write(struct hw *hw, unsigned int idx, void *blk)
935{
936 struct dao_ctrl_blk *ctl = blk;
937
938 if (ctl->dirty.bf.spos) {
939 if (idx < 4) {
940 /* S/PDIF SPOSx */
941 hw_write_20kx(hw, SPOS+0x4*idx, ctl->spos);
942 }
943 ctl->dirty.bf.spos = 0;
944 }
945
946 return 0;
947}
948
949static int dao_get_spos(void *blk, unsigned int *spos)
950{
951 *spos = ((struct dao_ctrl_blk *)blk)->spos;
952 return 0;
953}
954
955static int dao_get_ctrl_blk(void **rblk)
956{
957 struct dao_ctrl_blk *blk;
958
959 *rblk = NULL;
960 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
961 if (NULL == blk)
962 return -ENOMEM;
963
964 *rblk = blk;
965
966 return 0;
967}
968
969static int dao_put_ctrl_blk(void *blk)
970{
971 kfree((struct dao_ctrl_blk *)blk);
972
973 return 0;
974}
975
976static int daio_mgr_enb_dai(void *blk, unsigned int idx)
977{
978 struct daio_mgr_ctrl_blk *ctl = blk;
979
980 if (idx < 4) {
981 /* S/PDIF input */
982 set_field(&ctl->spictl, SPICTL_EN << (idx*8), 1);
983 ctl->dirty.bf.spictl |= (0x1 << idx);
984 } else {
985 /* I2S input */
986 idx %= 4;
987 set_field(&ctl->i2sctl, I2SCTL_EI << (idx*8), 1);
988 ctl->dirty.bf.i2sictl |= (0x1 << idx);
989 }
990 return 0;
991}
992
993static int daio_mgr_dsb_dai(void *blk, unsigned int idx)
994{
995 struct daio_mgr_ctrl_blk *ctl = blk;
996
997 if (idx < 4) {
998 /* S/PDIF input */
999 set_field(&ctl->spictl, SPICTL_EN << (idx*8), 0);
1000 ctl->dirty.bf.spictl |= (0x1 << idx);
1001 } else {
1002 /* I2S input */
1003 idx %= 4;
1004 set_field(&ctl->i2sctl, I2SCTL_EI << (idx*8), 0);
1005 ctl->dirty.bf.i2sictl |= (0x1 << idx);
1006 }
1007 return 0;
1008}
1009
1010static int daio_mgr_enb_dao(void *blk, unsigned int idx)
1011{
1012 struct daio_mgr_ctrl_blk *ctl = blk;
1013
1014 if (idx < 4) {
1015 /* S/PDIF output */
1016 set_field(&ctl->spoctl, SPOCTL_OE << (idx*8), 1);
1017 ctl->dirty.bf.spoctl |= (0x1 << idx);
1018 } else {
1019 /* I2S output */
1020 idx %= 4;
1021 set_field(&ctl->i2sctl, I2SCTL_EA << (idx*8), 1);
1022 ctl->dirty.bf.i2soctl |= (0x1 << idx);
1023 }
1024 return 0;
1025}
1026
1027static int daio_mgr_dsb_dao(void *blk, unsigned int idx)
1028{
1029 struct daio_mgr_ctrl_blk *ctl = blk;
1030
1031 if (idx < 4) {
1032 /* S/PDIF output */
1033 set_field(&ctl->spoctl, SPOCTL_OE << (idx*8), 0);
1034 ctl->dirty.bf.spoctl |= (0x1 << idx);
1035 } else {
1036 /* I2S output */
1037 idx %= 4;
1038 set_field(&ctl->i2sctl, I2SCTL_EA << (idx*8), 0);
1039 ctl->dirty.bf.i2soctl |= (0x1 << idx);
1040 }
1041 return 0;
1042}
1043
1044static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
1045{
1046 struct daio_mgr_ctrl_blk *ctl = blk;
1047
1048 if (idx < 4) {
1049 /* S/PDIF output */
1050 switch ((conf & 0x7)) {
1051 case 0:
1052 set_field(&ctl->spoctl, SPOCTL_SR << (idx*8), 3);
1053 break; /* CDIF */
1054 case 1:
1055 set_field(&ctl->spoctl, SPOCTL_SR << (idx*8), 0);
1056 break;
1057 case 2:
1058 set_field(&ctl->spoctl, SPOCTL_SR << (idx*8), 1);
1059 break;
1060 case 4:
1061 set_field(&ctl->spoctl, SPOCTL_SR << (idx*8), 2);
1062 break;
1063 default:
1064 break;
1065 }
1066 set_field(&ctl->spoctl, SPOCTL_LIV << (idx*8),
1067 (conf >> 4) & 0x1); /* Non-audio */
1068 set_field(&ctl->spoctl, SPOCTL_RIV << (idx*8),
1069 (conf >> 4) & 0x1); /* Non-audio */
1070 set_field(&ctl->spoctl, SPOCTL_OS << (idx*8),
1071 ((conf >> 3) & 0x1) ? 2 : 2); /* Raw */
1072
1073 ctl->dirty.bf.spoctl |= (0x1 << idx);
1074 } else {
1075 /* I2S output */
1076 /*idx %= 4; */
1077 }
1078 return 0;
1079}
1080
1081static int daio_mgr_set_imaparc(void *blk, unsigned int slot)
1082{
1083 struct daio_mgr_ctrl_blk *ctl = blk;
1084
1085 set_field(&ctl->daoimap.aim, AIM_ARC, slot);
1086 ctl->dirty.bf.daoimap = 1;
1087 return 0;
1088}
1089
1090static int daio_mgr_set_imapnxt(void *blk, unsigned int next)
1091{
1092 struct daio_mgr_ctrl_blk *ctl = blk;
1093
1094 set_field(&ctl->daoimap.aim, AIM_NXT, next);
1095 ctl->dirty.bf.daoimap = 1;
1096 return 0;
1097}
1098
1099static int daio_mgr_set_imapaddr(void *blk, unsigned int addr)
1100{
1101 struct daio_mgr_ctrl_blk *ctl = blk;
1102
1103 ctl->daoimap.idx = addr;
1104 ctl->dirty.bf.daoimap = 1;
1105 return 0;
1106}
1107
1108static int daio_mgr_commit_write(struct hw *hw, void *blk)
1109{
1110 struct daio_mgr_ctrl_blk *ctl = blk;
1111 int i;
1112
1113 if (ctl->dirty.bf.i2sictl || ctl->dirty.bf.i2soctl) {
1114 for (i = 0; i < 4; i++) {
1115 if ((ctl->dirty.bf.i2sictl & (0x1 << i)))
1116 ctl->dirty.bf.i2sictl &= ~(0x1 << i);
1117
1118 if ((ctl->dirty.bf.i2soctl & (0x1 << i)))
1119 ctl->dirty.bf.i2soctl &= ~(0x1 << i);
1120 }
1121 hw_write_20kx(hw, I2SCTL, ctl->i2sctl);
1122 mdelay(1);
1123 }
1124 if (ctl->dirty.bf.spoctl) {
1125 for (i = 0; i < 4; i++) {
1126 if ((ctl->dirty.bf.spoctl & (0x1 << i)))
1127 ctl->dirty.bf.spoctl &= ~(0x1 << i);
1128 }
1129 hw_write_20kx(hw, SPOCTL, ctl->spoctl);
1130 mdelay(1);
1131 }
1132 if (ctl->dirty.bf.spictl) {
1133 for (i = 0; i < 4; i++) {
1134 if ((ctl->dirty.bf.spictl & (0x1 << i)))
1135 ctl->dirty.bf.spictl &= ~(0x1 << i);
1136 }
1137 hw_write_20kx(hw, SPICTL, ctl->spictl);
1138 mdelay(1);
1139 }
1140 if (ctl->dirty.bf.daoimap) {
1141 hw_write_20kx(hw, DAOIMAP+ctl->daoimap.idx*4,
1142 ctl->daoimap.aim);
1143 ctl->dirty.bf.daoimap = 0;
1144 }
1145
1146 return 0;
1147}
1148
1149static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
1150{
1151 struct daio_mgr_ctrl_blk *blk;
1152
1153 *rblk = NULL;
1154 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
1155 if (NULL == blk)
1156 return -ENOMEM;
1157
1158 blk->i2sctl = hw_read_20kx(hw, I2SCTL);
1159 blk->spoctl = hw_read_20kx(hw, SPOCTL);
1160 blk->spictl = hw_read_20kx(hw, SPICTL);
1161
1162 *rblk = blk;
1163
1164 return 0;
1165}
1166
1167static int daio_mgr_put_ctrl_blk(void *blk)
1168{
1169 kfree((struct daio_mgr_ctrl_blk *)blk);
1170
1171 return 0;
1172}
1173
1174/* Timer interrupt */
1175static int set_timer_irq(struct hw *hw, int enable)
1176{
1177 hw_write_20kx(hw, GIE, enable ? IT_INT : 0);
1178 return 0;
1179}
1180
1181static int set_timer_tick(struct hw *hw, unsigned int ticks)
1182{
1183 if (ticks)
1184 ticks |= TIMR_IE | TIMR_IP;
1185 hw_write_20kx(hw, TIMR, ticks);
1186 return 0;
1187}
1188
1189static unsigned int get_wc(struct hw *hw)
1190{
1191 return hw_read_20kx(hw, WC);
1192}
1193
1194/* Card hardware initialization block */
1195struct dac_conf {
1196 unsigned int msr; /* master sample rate in rsrs */
1197};
1198
1199struct adc_conf {
1200 unsigned int msr; /* master sample rate in rsrs */
1201 unsigned char input; /* the input source of ADC */
1202 unsigned char mic20db; /* boost mic by 20db if input is microphone */
1203};
1204
1205struct daio_conf {
1206 unsigned int msr; /* master sample rate in rsrs */
1207};
1208
1209struct trn_conf {
1210 unsigned long vm_pgt_phys;
1211};
1212
1213static int hw_daio_init(struct hw *hw, const struct daio_conf *info)
1214{
1215 u32 i2sorg;
1216 u32 spdorg;
1217
1218 /* Read I2S CTL. Keep original value. */
1219 /*i2sorg = hw_read_20kx(hw, I2SCTL);*/
1220 i2sorg = 0x94040404; /* enable all audio out and I2S-D input */
1221 /* Program I2S with proper master sample rate and enable
1222 * the correct I2S channel. */
1223 i2sorg &= 0xfffffffc;
1224
1225 /* Enable S/PDIF-out-A in fixed 24-bit data
1226 * format and default to 48kHz. */
1227 /* Disable all before doing any changes. */
1228 hw_write_20kx(hw, SPOCTL, 0x0);
1229 spdorg = 0x05;
1230
1231 switch (info->msr) {
1232 case 1:
1233 i2sorg |= 1;
1234 spdorg |= (0x0 << 6);
1235 break;
1236 case 2:
1237 i2sorg |= 2;
1238 spdorg |= (0x1 << 6);
1239 break;
1240 case 4:
1241 i2sorg |= 3;
1242 spdorg |= (0x2 << 6);
1243 break;
1244 default:
1245 i2sorg |= 1;
1246 break;
1247 }
1248
1249 hw_write_20kx(hw, I2SCTL, i2sorg);
1250 hw_write_20kx(hw, SPOCTL, spdorg);
1251
1252 /* Enable S/PDIF-in-A in fixed 24-bit data format. */
1253 /* Disable all before doing any changes. */
1254 hw_write_20kx(hw, SPICTL, 0x0);
1255 mdelay(1);
1256 spdorg = 0x0a0a0a0a;
1257 hw_write_20kx(hw, SPICTL, spdorg);
1258 mdelay(1);
1259
1260 return 0;
1261}
1262
1263/* TRANSPORT operations */
1264static int hw_trn_init(struct hw *hw, const struct trn_conf *info)
1265{
1266 u32 trnctl;
1267 u32 ptp_phys_low, ptp_phys_high;
1268
1269 /* Set up device page table */
1270 if ((~0UL) == info->vm_pgt_phys) {
1271 printk(KERN_ERR "Wrong device page table page address!\n");
1272 return -1;
1273 }
1274
1275 trnctl = 0x13; /* 32-bit, 4k-size page */
1276 ptp_phys_low = (u32)info->vm_pgt_phys;
1277 ptp_phys_high = upper_32_bits(info->vm_pgt_phys);
1278 if (sizeof(void *) == 8) /* 64bit address */
1279 trnctl |= (1 << 2);
1280#if 0 /* Only 4k h/w pages for simplicitiy */
1281#if PAGE_SIZE == 8192
1282 trnctl |= (1<<5);
1283#endif
1284#endif
1285 hw_write_20kx(hw, PTPALX, ptp_phys_low);
1286 hw_write_20kx(hw, PTPAHX, ptp_phys_high);
1287 hw_write_20kx(hw, TRNCTL, trnctl);
1288 hw_write_20kx(hw, TRNIS, 0x200c01); /* realy needed? */
1289
1290 return 0;
1291}
1292
1293/* Card initialization */
1294#define GCTL_EAC 0x00000001
1295#define GCTL_EAI 0x00000002
1296#define GCTL_BEP 0x00000004
1297#define GCTL_BES 0x00000008
1298#define GCTL_DSP 0x00000010
1299#define GCTL_DBP 0x00000020
1300#define GCTL_ABP 0x00000040
1301#define GCTL_TBP 0x00000080
1302#define GCTL_SBP 0x00000100
1303#define GCTL_FBP 0x00000200
1304#define GCTL_XA 0x00000400
1305#define GCTL_ET 0x00000800
1306#define GCTL_PR 0x00001000
1307#define GCTL_MRL 0x00002000
1308#define GCTL_SDE 0x00004000
1309#define GCTL_SDI 0x00008000
1310#define GCTL_SM 0x00010000
1311#define GCTL_SR 0x00020000
1312#define GCTL_SD 0x00040000
1313#define GCTL_SE 0x00080000
1314#define GCTL_AID 0x00100000
1315
1316static int hw_pll_init(struct hw *hw, unsigned int rsr)
1317{
1318 unsigned int pllctl;
1319 int i;
1320
1321 pllctl = (48000 == rsr) ? 0x1480a001 : 0x1480a731;
1322 for (i = 0; i < 3; i++) {
1323 if (hw_read_20kx(hw, PLLCTL) == pllctl)
1324 break;
1325
1326 hw_write_20kx(hw, PLLCTL, pllctl);
1327 mdelay(40);
1328 }
1329 if (i >= 3) {
1330 printk(KERN_ALERT "PLL initialization failed!!!\n");
1331 return -EBUSY;
1332 }
1333
1334 return 0;
1335}
1336
1337static int hw_auto_init(struct hw *hw)
1338{
1339 unsigned int gctl;
1340 int i;
1341
1342 gctl = hw_read_20kx(hw, GCTL);
1343 set_field(&gctl, GCTL_EAI, 0);
1344 hw_write_20kx(hw, GCTL, gctl);
1345 set_field(&gctl, GCTL_EAI, 1);
1346 hw_write_20kx(hw, GCTL, gctl);
1347 mdelay(10);
1348 for (i = 0; i < 400000; i++) {
1349 gctl = hw_read_20kx(hw, GCTL);
1350 if (get_field(gctl, GCTL_AID))
1351 break;
1352 }
1353 if (!get_field(gctl, GCTL_AID)) {
1354 printk(KERN_ALERT "Card Auto-init failed!!!\n");
1355 return -EBUSY;
1356 }
1357
1358 return 0;
1359}
1360
1361static int i2c_unlock(struct hw *hw)
1362{
1363 if ((hw_read_pci(hw, 0xcc) & 0xff) == 0xaa)
1364 return 0;
1365
1366 hw_write_pci(hw, 0xcc, 0x8c);
1367 hw_write_pci(hw, 0xcc, 0x0e);
1368 if ((hw_read_pci(hw, 0xcc) & 0xff) == 0xaa)
1369 return 0;
1370
1371 hw_write_pci(hw, 0xcc, 0xee);
1372 hw_write_pci(hw, 0xcc, 0xaa);
1373 if ((hw_read_pci(hw, 0xcc) & 0xff) == 0xaa)
1374 return 0;
1375
1376 return -1;
1377}
1378
1379static void i2c_lock(struct hw *hw)
1380{
1381 if ((hw_read_pci(hw, 0xcc) & 0xff) == 0xaa)
1382 hw_write_pci(hw, 0xcc, 0x00);
1383}
1384
1385static void i2c_write(struct hw *hw, u32 device, u32 addr, u32 data)
1386{
1387 unsigned int ret;
1388
1389 do {
1390 ret = hw_read_pci(hw, 0xEC);
1391 } while (!(ret & 0x800000));
1392 hw_write_pci(hw, 0xE0, device);
1393 hw_write_pci(hw, 0xE4, (data << 8) | (addr & 0xff));
1394}
1395
1396/* DAC operations */
1397
1398static int hw_reset_dac(struct hw *hw)
1399{
1400 u32 i;
1401 u16 gpioorg;
1402 unsigned int ret;
1403
1404 if (i2c_unlock(hw))
1405 return -1;
1406
1407 do {
1408 ret = hw_read_pci(hw, 0xEC);
1409 } while (!(ret & 0x800000));
1410 hw_write_pci(hw, 0xEC, 0x05); /* write to i2c status control */
1411
1412 /* To be effective, need to reset the DAC twice. */
1413 for (i = 0; i < 2; i++) {
1414 /* set gpio */
1415 mdelay(100);
1416 gpioorg = (u16)hw_read_20kx(hw, GPIO);
1417 gpioorg &= 0xfffd;
1418 hw_write_20kx(hw, GPIO, gpioorg);
1419 mdelay(1);
1420 hw_write_20kx(hw, GPIO, gpioorg | 0x2);
1421 }
1422
1423 i2c_write(hw, 0x00180080, 0x01, 0x80);
1424 i2c_write(hw, 0x00180080, 0x02, 0x10);
1425
1426 i2c_lock(hw);
1427
1428 return 0;
1429}
1430
1431static int hw_dac_init(struct hw *hw, const struct dac_conf *info)
1432{
1433 u32 data;
1434 u16 gpioorg;
1435 unsigned int ret;
1436
1437 if (hw->model == CTSB055X) {
1438 /* SB055x, unmute outputs */
1439 gpioorg = (u16)hw_read_20kx(hw, GPIO);
1440 gpioorg &= 0xffbf; /* set GPIO6 to low */
1441 gpioorg |= 2; /* set GPIO1 to high */
1442 hw_write_20kx(hw, GPIO, gpioorg);
1443 return 0;
1444 }
1445
1446 /* mute outputs */
1447 gpioorg = (u16)hw_read_20kx(hw, GPIO);
1448 gpioorg &= 0xffbf;
1449 hw_write_20kx(hw, GPIO, gpioorg);
1450
1451 hw_reset_dac(hw);
1452
1453 if (i2c_unlock(hw))
1454 return -1;
1455
1456 hw_write_pci(hw, 0xEC, 0x05); /* write to i2c status control */
1457 do {
1458 ret = hw_read_pci(hw, 0xEC);
1459 } while (!(ret & 0x800000));
1460
1461 switch (info->msr) {
1462 case 1:
1463 data = 0x24;
1464 break;
1465 case 2:
1466 data = 0x25;
1467 break;
1468 case 4:
1469 data = 0x26;
1470 break;
1471 default:
1472 data = 0x24;
1473 break;
1474 }
1475
1476 i2c_write(hw, 0x00180080, 0x06, data);
1477 i2c_write(hw, 0x00180080, 0x09, data);
1478 i2c_write(hw, 0x00180080, 0x0c, data);
1479 i2c_write(hw, 0x00180080, 0x0f, data);
1480
1481 i2c_lock(hw);
1482
1483 /* unmute outputs */
1484 gpioorg = (u16)hw_read_20kx(hw, GPIO);
1485 gpioorg = gpioorg | 0x40;
1486 hw_write_20kx(hw, GPIO, gpioorg);
1487
1488 return 0;
1489}
1490
1491/* ADC operations */
1492
1493static int is_adc_input_selected_SB055x(struct hw *hw, enum ADCSRC type)
1494{
1495 return 0;
1496}
1497
1498static int is_adc_input_selected_SBx(struct hw *hw, enum ADCSRC type)
1499{
1500 u32 data;
1501
1502 data = hw_read_20kx(hw, GPIO);
1503 switch (type) {
1504 case ADC_MICIN:
1505 data = ((data & (0x1<<7)) && (data & (0x1<<8)));
1506 break;
1507 case ADC_LINEIN:
1508 data = (!(data & (0x1<<7)) && (data & (0x1<<8)));
1509 break;
1510 case ADC_NONE: /* Digital I/O */
1511 data = (!(data & (0x1<<8)));
1512 break;
1513 default:
1514 data = 0;
1515 }
1516 return data;
1517}
1518
1519static int is_adc_input_selected_hendrix(struct hw *hw, enum ADCSRC type)
1520{
1521 u32 data;
1522
1523 data = hw_read_20kx(hw, GPIO);
1524 switch (type) {
1525 case ADC_MICIN:
1526 data = (data & (0x1 << 7)) ? 1 : 0;
1527 break;
1528 case ADC_LINEIN:
1529 data = (data & (0x1 << 7)) ? 0 : 1;
1530 break;
1531 default:
1532 data = 0;
1533 }
1534 return data;
1535}
1536
1537static int hw_is_adc_input_selected(struct hw *hw, enum ADCSRC type)
1538{
1539 switch (hw->model) {
1540 case CTSB055X:
1541 return is_adc_input_selected_SB055x(hw, type);
1542 case CTSB073X:
1543 return is_adc_input_selected_hendrix(hw, type);
1544 case CTUAA:
1545 return is_adc_input_selected_hendrix(hw, type);
1546 default:
1547 return is_adc_input_selected_SBx(hw, type);
1548 }
1549}
1550
1551static int
1552adc_input_select_SB055x(struct hw *hw, enum ADCSRC type, unsigned char boost)
1553{
1554 u32 data;
1555
1556 /*
1557 * check and set the following GPIO bits accordingly
1558 * ADC_Gain = GPIO2
1559 * DRM_off = GPIO3
1560 * Mic_Pwr_on = GPIO7
1561 * Digital_IO_Sel = GPIO8
1562 * Mic_Sw = GPIO9
1563 * Aux/MicLine_Sw = GPIO12
1564 */
1565 data = hw_read_20kx(hw, GPIO);
1566 data &= 0xec73;
1567 switch (type) {
1568 case ADC_MICIN:
1569 data |= (0x1<<7) | (0x1<<8) | (0x1<<9) ;
1570 data |= boost ? (0x1<<2) : 0;
1571 break;
1572 case ADC_LINEIN:
1573 data |= (0x1<<8);
1574 break;
1575 case ADC_AUX:
1576 data |= (0x1<<8) | (0x1<<12);
1577 break;
1578 case ADC_NONE:
1579 data |= (0x1<<12); /* set to digital */
1580 break;
1581 default:
1582 return -1;
1583 }
1584
1585 hw_write_20kx(hw, GPIO, data);
1586
1587 return 0;
1588}
1589
1590
1591static int
1592adc_input_select_SBx(struct hw *hw, enum ADCSRC type, unsigned char boost)
1593{
1594 u32 data;
1595 u32 i2c_data;
1596 unsigned int ret;
1597
1598 if (i2c_unlock(hw))
1599 return -1;
1600
1601 do {
1602 ret = hw_read_pci(hw, 0xEC);
1603 } while (!(ret & 0x800000)); /* i2c ready poll */
1604 /* set i2c access mode as Direct Control */
1605 hw_write_pci(hw, 0xEC, 0x05);
1606
1607 data = hw_read_20kx(hw, GPIO);
1608 switch (type) {
1609 case ADC_MICIN:
1610 data |= ((0x1 << 7) | (0x1 << 8));
1611 i2c_data = 0x1; /* Mic-in */
1612 break;
1613 case ADC_LINEIN:
1614 data &= ~(0x1 << 7);
1615 data |= (0x1 << 8);
1616 i2c_data = 0x2; /* Line-in */
1617 break;
1618 case ADC_NONE:
1619 data &= ~(0x1 << 8);
1620 i2c_data = 0x0; /* set to Digital */
1621 break;
1622 default:
1623 i2c_lock(hw);
1624 return -1;
1625 }
1626 hw_write_20kx(hw, GPIO, data);
1627 i2c_write(hw, 0x001a0080, 0x2a, i2c_data);
1628 if (boost) {
1629 i2c_write(hw, 0x001a0080, 0x1c, 0xe7); /* +12dB boost */
1630 i2c_write(hw, 0x001a0080, 0x1e, 0xe7); /* +12dB boost */
1631 } else {
1632 i2c_write(hw, 0x001a0080, 0x1c, 0xcf); /* No boost */
1633 i2c_write(hw, 0x001a0080, 0x1e, 0xcf); /* No boost */
1634 }
1635
1636 i2c_lock(hw);
1637
1638 return 0;
1639}
1640
1641static int
1642adc_input_select_hendrix(struct hw *hw, enum ADCSRC type, unsigned char boost)
1643{
1644 u32 data;
1645 u32 i2c_data;
1646 unsigned int ret;
1647
1648 if (i2c_unlock(hw))
1649 return -1;
1650
1651 do {
1652 ret = hw_read_pci(hw, 0xEC);
1653 } while (!(ret & 0x800000)); /* i2c ready poll */
1654 /* set i2c access mode as Direct Control */
1655 hw_write_pci(hw, 0xEC, 0x05);
1656
1657 data = hw_read_20kx(hw, GPIO);
1658 switch (type) {
1659 case ADC_MICIN:
1660 data |= (0x1 << 7);
1661 i2c_data = 0x1; /* Mic-in */
1662 break;
1663 case ADC_LINEIN:
1664 data &= ~(0x1 << 7);
1665 i2c_data = 0x2; /* Line-in */
1666 break;
1667 default:
1668 i2c_lock(hw);
1669 return -1;
1670 }
1671 hw_write_20kx(hw, GPIO, data);
1672 i2c_write(hw, 0x001a0080, 0x2a, i2c_data);
1673 if (boost) {
1674 i2c_write(hw, 0x001a0080, 0x1c, 0xe7); /* +12dB boost */
1675 i2c_write(hw, 0x001a0080, 0x1e, 0xe7); /* +12dB boost */
1676 } else {
1677 i2c_write(hw, 0x001a0080, 0x1c, 0xcf); /* No boost */
1678 i2c_write(hw, 0x001a0080, 0x1e, 0xcf); /* No boost */
1679 }
1680
1681 i2c_lock(hw);
1682
1683 return 0;
1684}
1685
1686static int hw_adc_input_select(struct hw *hw, enum ADCSRC type)
1687{
1688 int state = type == ADC_MICIN;
1689
1690 switch (hw->model) {
1691 case CTSB055X:
1692 return adc_input_select_SB055x(hw, type, state);
1693 case CTSB073X:
1694 return adc_input_select_hendrix(hw, type, state);
1695 case CTUAA:
1696 return adc_input_select_hendrix(hw, type, state);
1697 default:
1698 return adc_input_select_SBx(hw, type, state);
1699 }
1700}
1701
1702static int adc_init_SB055x(struct hw *hw, int input, int mic20db)
1703{
1704 return adc_input_select_SB055x(hw, input, mic20db);
1705}
1706
1707static int adc_init_SBx(struct hw *hw, int input, int mic20db)
1708{
1709 u16 gpioorg;
1710 u16 input_source;
1711 u32 adcdata;
1712 unsigned int ret;
1713
1714 input_source = 0x100; /* default to analog */
1715 switch (input) {
1716 case ADC_MICIN:
1717 adcdata = 0x1;
1718 input_source = 0x180; /* set GPIO7 to select Mic */
1719 break;
1720 case ADC_LINEIN:
1721 adcdata = 0x2;
1722 break;
1723 case ADC_VIDEO:
1724 adcdata = 0x4;
1725 break;
1726 case ADC_AUX:
1727 adcdata = 0x8;
1728 break;
1729 case ADC_NONE:
1730 adcdata = 0x0;
1731 input_source = 0x0; /* set to Digital */
1732 break;
1733 default:
1734 adcdata = 0x0;
1735 break;
1736 }
1737
1738 if (i2c_unlock(hw))
1739 return -1;
1740
1741 do {
1742 ret = hw_read_pci(hw, 0xEC);
1743 } while (!(ret & 0x800000)); /* i2c ready poll */
1744 hw_write_pci(hw, 0xEC, 0x05); /* write to i2c status control */
1745
1746 i2c_write(hw, 0x001a0080, 0x0e, 0x08);
1747 i2c_write(hw, 0x001a0080, 0x18, 0x0a);
1748 i2c_write(hw, 0x001a0080, 0x28, 0x86);
1749 i2c_write(hw, 0x001a0080, 0x2a, adcdata);
1750
1751 if (mic20db) {
1752 i2c_write(hw, 0x001a0080, 0x1c, 0xf7);
1753 i2c_write(hw, 0x001a0080, 0x1e, 0xf7);
1754 } else {
1755 i2c_write(hw, 0x001a0080, 0x1c, 0xcf);
1756 i2c_write(hw, 0x001a0080, 0x1e, 0xcf);
1757 }
1758
1759 if (!(hw_read_20kx(hw, ID0) & 0x100))
1760 i2c_write(hw, 0x001a0080, 0x16, 0x26);
1761
1762 i2c_lock(hw);
1763
1764 gpioorg = (u16)hw_read_20kx(hw, GPIO);
1765 gpioorg &= 0xfe7f;
1766 gpioorg |= input_source;
1767 hw_write_20kx(hw, GPIO, gpioorg);
1768
1769 return 0;
1770}
1771
1772static int hw_adc_init(struct hw *hw, const struct adc_conf *info)
1773{
1774 if (hw->model == CTSB055X)
1775 return adc_init_SB055x(hw, info->input, info->mic20db);
1776 else
1777 return adc_init_SBx(hw, info->input, info->mic20db);
1778}
1779
1780static int hw_have_digit_io_switch(struct hw *hw)
1781{
1782 /* SB073x and Vista compatible cards have no digit IO switch */
1783 return !(hw->model == CTSB073X || hw->model == CTUAA);
1784}
1785
1786#define CTLBITS(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
1787
1788#define UAA_CFG_PWRSTATUS 0x44
1789#define UAA_CFG_SPACE_FLAG 0xA0
1790#define UAA_CORE_CHANGE 0x3FFC
1791static int uaa_to_xfi(struct pci_dev *pci)
1792{
1793 unsigned int bar0, bar1, bar2, bar3, bar4, bar5;
1794 unsigned int cmd, irq, cl_size, l_timer, pwr;
1795 unsigned int is_uaa;
1796 unsigned int data[4] = {0};
1797 unsigned int io_base;
1798 void *mem_base;
1799 int i;
1800 const u32 CTLX = CTLBITS('C', 'T', 'L', 'X');
1801 const u32 CTL_ = CTLBITS('C', 'T', 'L', '-');
1802 const u32 CTLF = CTLBITS('C', 'T', 'L', 'F');
1803 const u32 CTLi = CTLBITS('C', 'T', 'L', 'i');
1804 const u32 CTLA = CTLBITS('C', 'T', 'L', 'A');
1805 const u32 CTLZ = CTLBITS('C', 'T', 'L', 'Z');
1806 const u32 CTLL = CTLBITS('C', 'T', 'L', 'L');
1807
1808 /* By default, Hendrix card UAA Bar0 should be using memory... */
1809 io_base = pci_resource_start(pci, 0);
1810 mem_base = ioremap(io_base, pci_resource_len(pci, 0));
1811 if (NULL == mem_base)
1812 return -ENOENT;
1813
1814 /* Read current mode from Mode Change Register */
1815 for (i = 0; i < 4; i++)
1816 data[i] = readl(mem_base + UAA_CORE_CHANGE);
1817
1818 /* Determine current mode... */
1819 if (data[0] == CTLA) {
1820 is_uaa = ((data[1] == CTLZ && data[2] == CTLL
1821 && data[3] == CTLA) || (data[1] == CTLA
1822 && data[2] == CTLZ && data[3] == CTLL));
1823 } else if (data[0] == CTLZ) {
1824 is_uaa = (data[1] == CTLL
1825 && data[2] == CTLA && data[3] == CTLA);
1826 } else if (data[0] == CTLL) {
1827 is_uaa = (data[1] == CTLA
1828 && data[2] == CTLA && data[3] == CTLZ);
1829 } else {
1830 is_uaa = 0;
1831 }
1832
1833 if (!is_uaa) {
1834 /* Not in UAA mode currently. Return directly. */
1835 iounmap(mem_base);
1836 return 0;
1837 }
1838
1839 pci_read_config_dword(pci, PCI_BASE_ADDRESS_0, &bar0);
1840 pci_read_config_dword(pci, PCI_BASE_ADDRESS_1, &bar1);
1841 pci_read_config_dword(pci, PCI_BASE_ADDRESS_2, &bar2);
1842 pci_read_config_dword(pci, PCI_BASE_ADDRESS_3, &bar3);
1843 pci_read_config_dword(pci, PCI_BASE_ADDRESS_4, &bar4);
1844 pci_read_config_dword(pci, PCI_BASE_ADDRESS_5, &bar5);
1845 pci_read_config_dword(pci, PCI_INTERRUPT_LINE, &irq);
1846 pci_read_config_dword(pci, PCI_CACHE_LINE_SIZE, &cl_size);
1847 pci_read_config_dword(pci, PCI_LATENCY_TIMER, &l_timer);
1848 pci_read_config_dword(pci, UAA_CFG_PWRSTATUS, &pwr);
1849 pci_read_config_dword(pci, PCI_COMMAND, &cmd);
1850
1851 /* Set up X-Fi core PCI configuration space. */
1852 /* Switch to X-Fi config space with BAR0 exposed. */
1853 pci_write_config_dword(pci, UAA_CFG_SPACE_FLAG, 0x87654321);
1854 /* Copy UAA's BAR5 into X-Fi BAR0 */
1855 pci_write_config_dword(pci, PCI_BASE_ADDRESS_0, bar5);
1856 /* Switch to X-Fi config space without BAR0 exposed. */
1857 pci_write_config_dword(pci, UAA_CFG_SPACE_FLAG, 0x12345678);
1858 pci_write_config_dword(pci, PCI_BASE_ADDRESS_1, bar1);
1859 pci_write_config_dword(pci, PCI_BASE_ADDRESS_2, bar2);
1860 pci_write_config_dword(pci, PCI_BASE_ADDRESS_3, bar3);
1861 pci_write_config_dword(pci, PCI_BASE_ADDRESS_4, bar4);
1862 pci_write_config_dword(pci, PCI_INTERRUPT_LINE, irq);
1863 pci_write_config_dword(pci, PCI_CACHE_LINE_SIZE, cl_size);
1864 pci_write_config_dword(pci, PCI_LATENCY_TIMER, l_timer);
1865 pci_write_config_dword(pci, UAA_CFG_PWRSTATUS, pwr);
1866 pci_write_config_dword(pci, PCI_COMMAND, cmd);
1867
1868 /* Switch to X-Fi mode */
1869 writel(CTLX, (mem_base + UAA_CORE_CHANGE));
1870 writel(CTL_, (mem_base + UAA_CORE_CHANGE));
1871 writel(CTLF, (mem_base + UAA_CORE_CHANGE));
1872 writel(CTLi, (mem_base + UAA_CORE_CHANGE));
1873
1874 iounmap(mem_base);
1875
1876 return 0;
1877}
1878
1879static irqreturn_t ct_20k1_interrupt(int irq, void *dev_id)
1880{
1881 struct hw *hw = dev_id;
1882 unsigned int status;
1883
1884 status = hw_read_20kx(hw, GIP);
1885 if (!status)
1886 return IRQ_NONE;
1887
1888 if (hw->irq_callback)
1889 hw->irq_callback(hw->irq_callback_data, status);
1890
1891 hw_write_20kx(hw, GIP, status);
1892 return IRQ_HANDLED;
1893}
1894
1895static int hw_card_start(struct hw *hw)
1896{
1897 int err;
1898 struct pci_dev *pci = hw->pci;
1899
1900 err = pci_enable_device(pci);
1901 if (err < 0)
1902 return err;
1903
1904 /* Set DMA transfer mask */
1905 if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
1906 pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
1907 printk(KERN_ERR "architecture does not support PCI "
1908 "busmaster DMA with mask 0x%llx\n",
1909 CT_XFI_DMA_MASK);
1910 err = -ENXIO;
1911 goto error1;
1912 }
1913
1914 err = pci_request_regions(pci, "XFi");
1915 if (err < 0)
1916 goto error1;
1917
1918 /* Switch to X-Fi mode from UAA mode if neeeded */
1919 if (hw->model == CTUAA) {
1920 err = uaa_to_xfi(pci);
1921 if (err)
1922 goto error2;
1923
1924 hw->io_base = pci_resource_start(pci, 5);
1925 } else {
1926 hw->io_base = pci_resource_start(pci, 0);
1927 }
1928
1929 err = request_irq(pci->irq, ct_20k1_interrupt, IRQF_SHARED,
1930 "ctxfi", hw);
1931 if (err < 0) {
1932 printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
1933 goto error2;
1934 }
1935 hw->irq = pci->irq;
1936
1937 pci_set_master(pci);
1938
1939 return 0;
1940
1941error2:
1942 pci_release_regions(pci);
1943 hw->io_base = 0;
1944error1:
1945 pci_disable_device(pci);
1946 return err;
1947}
1948
1949static int hw_card_stop(struct hw *hw)
1950{
1951 /* TODO: Disable interrupt and so on... */
1952 if (hw->irq >= 0)
1953 synchronize_irq(hw->irq);
1954 return 0;
1955}
1956
1957static int hw_card_shutdown(struct hw *hw)
1958{
1959 if (hw->irq >= 0)
1960 free_irq(hw->irq, hw);
1961
1962 hw->irq = -1;
1963
1964 if (NULL != ((void *)hw->mem_base))
1965 iounmap((void *)hw->mem_base);
1966
1967 hw->mem_base = (unsigned long)NULL;
1968
1969 if (hw->io_base)
1970 pci_release_regions(hw->pci);
1971
1972 hw->io_base = 0;
1973
1974 pci_disable_device(hw->pci);
1975
1976 return 0;
1977}
1978
1979static int hw_card_init(struct hw *hw, struct card_conf *info)
1980{
1981 int err;
1982 unsigned int gctl;
1983 u32 data;
1984 struct dac_conf dac_info = {0};
1985 struct adc_conf adc_info = {0};
1986 struct daio_conf daio_info = {0};
1987 struct trn_conf trn_info = {0};
1988
1989 /* Get PCI io port base address and do Hendrix switch if needed. */
1990 if (!hw->io_base) {
1991 err = hw_card_start(hw);
1992 if (err)
1993 return err;
1994 }
1995
1996 /* PLL init */
1997 err = hw_pll_init(hw, info->rsr);
1998 if (err < 0)
1999 return err;
2000
2001 /* kick off auto-init */
2002 err = hw_auto_init(hw);
2003 if (err < 0)
2004 return err;
2005
2006 /* Enable audio ring */
2007 gctl = hw_read_20kx(hw, GCTL);
2008 set_field(&gctl, GCTL_EAC, 1);
2009 set_field(&gctl, GCTL_DBP, 1);
2010 set_field(&gctl, GCTL_TBP, 1);
2011 set_field(&gctl, GCTL_FBP, 1);
2012 set_field(&gctl, GCTL_ET, 1);
2013 hw_write_20kx(hw, GCTL, gctl);
2014 mdelay(10);
2015
2016 /* Reset all global pending interrupts */
2017 hw_write_20kx(hw, GIE, 0);
2018 /* Reset all SRC pending interrupts */
2019 hw_write_20kx(hw, SRCIP, 0);
2020 mdelay(30);
2021
2022 /* Detect the card ID and configure GPIO accordingly. */
2023 switch (hw->model) {
2024 case CTSB055X:
2025 hw_write_20kx(hw, GPIOCTL, 0x13fe);
2026 break;
2027 case CTSB073X:
2028 hw_write_20kx(hw, GPIOCTL, 0x00e6);
2029 break;
2030 case CTUAA:
2031 hw_write_20kx(hw, GPIOCTL, 0x00c2);
2032 break;
2033 default:
2034 hw_write_20kx(hw, GPIOCTL, 0x01e6);
2035 break;
2036 }
2037
2038 trn_info.vm_pgt_phys = info->vm_pgt_phys;
2039 err = hw_trn_init(hw, &trn_info);
2040 if (err < 0)
2041 return err;
2042
2043 daio_info.msr = info->msr;
2044 err = hw_daio_init(hw, &daio_info);
2045 if (err < 0)
2046 return err;
2047
2048 dac_info.msr = info->msr;
2049 err = hw_dac_init(hw, &dac_info);
2050 if (err < 0)
2051 return err;
2052
2053 adc_info.msr = info->msr;
2054 adc_info.input = ADC_LINEIN;
2055 adc_info.mic20db = 0;
2056 err = hw_adc_init(hw, &adc_info);
2057 if (err < 0)
2058 return err;
2059
2060 data = hw_read_20kx(hw, SRCMCTL);
2061 data |= 0x1; /* Enables input from the audio ring */
2062 hw_write_20kx(hw, SRCMCTL, data);
2063
2064 return 0;
2065}
2066
2067static u32 hw_read_20kx(struct hw *hw, u32 reg)
2068{
2069 u32 value;
2070 unsigned long flags;
2071
2072 spin_lock_irqsave(
2073 &container_of(hw, struct hw20k1, hw)->reg_20k1_lock, flags);
2074 outl(reg, hw->io_base + 0x0);
2075 value = inl(hw->io_base + 0x4);
2076 spin_unlock_irqrestore(
2077 &container_of(hw, struct hw20k1, hw)->reg_20k1_lock, flags);
2078
2079 return value;
2080}
2081
2082static void hw_write_20kx(struct hw *hw, u32 reg, u32 data)
2083{
2084 unsigned long flags;
2085
2086 spin_lock_irqsave(
2087 &container_of(hw, struct hw20k1, hw)->reg_20k1_lock, flags);
2088 outl(reg, hw->io_base + 0x0);
2089 outl(data, hw->io_base + 0x4);
2090 spin_unlock_irqrestore(
2091 &container_of(hw, struct hw20k1, hw)->reg_20k1_lock, flags);
2092
2093}
2094
2095static u32 hw_read_pci(struct hw *hw, u32 reg)
2096{
2097 u32 value;
2098 unsigned long flags;
2099
2100 spin_lock_irqsave(
2101 &container_of(hw, struct hw20k1, hw)->reg_pci_lock, flags);
2102 outl(reg, hw->io_base + 0x10);
2103 value = inl(hw->io_base + 0x14);
2104 spin_unlock_irqrestore(
2105 &container_of(hw, struct hw20k1, hw)->reg_pci_lock, flags);
2106
2107 return value;
2108}
2109
2110static void hw_write_pci(struct hw *hw, u32 reg, u32 data)
2111{
2112 unsigned long flags;
2113
2114 spin_lock_irqsave(
2115 &container_of(hw, struct hw20k1, hw)->reg_pci_lock, flags);
2116 outl(reg, hw->io_base + 0x10);
2117 outl(data, hw->io_base + 0x14);
2118 spin_unlock_irqrestore(
2119 &container_of(hw, struct hw20k1, hw)->reg_pci_lock, flags);
2120}
2121
2122static struct hw ct20k1_preset __devinitdata = {
2123 .irq = -1,
2124
2125 .card_init = hw_card_init,
2126 .card_stop = hw_card_stop,
2127 .pll_init = hw_pll_init,
2128 .is_adc_source_selected = hw_is_adc_input_selected,
2129 .select_adc_source = hw_adc_input_select,
2130 .have_digit_io_switch = hw_have_digit_io_switch,
2131
2132 .src_rsc_get_ctrl_blk = src_get_rsc_ctrl_blk,
2133 .src_rsc_put_ctrl_blk = src_put_rsc_ctrl_blk,
2134 .src_mgr_get_ctrl_blk = src_mgr_get_ctrl_blk,
2135 .src_mgr_put_ctrl_blk = src_mgr_put_ctrl_blk,
2136 .src_set_state = src_set_state,
2137 .src_set_bm = src_set_bm,
2138 .src_set_rsr = src_set_rsr,
2139 .src_set_sf = src_set_sf,
2140 .src_set_wr = src_set_wr,
2141 .src_set_pm = src_set_pm,
2142 .src_set_rom = src_set_rom,
2143 .src_set_vo = src_set_vo,
2144 .src_set_st = src_set_st,
2145 .src_set_ie = src_set_ie,
2146 .src_set_ilsz = src_set_ilsz,
2147 .src_set_bp = src_set_bp,
2148 .src_set_cisz = src_set_cisz,
2149 .src_set_ca = src_set_ca,
2150 .src_set_sa = src_set_sa,
2151 .src_set_la = src_set_la,
2152 .src_set_pitch = src_set_pitch,
2153 .src_set_dirty = src_set_dirty,
2154 .src_set_clear_zbufs = src_set_clear_zbufs,
2155 .src_set_dirty_all = src_set_dirty_all,
2156 .src_commit_write = src_commit_write,
2157 .src_get_ca = src_get_ca,
2158 .src_get_dirty = src_get_dirty,
2159 .src_dirty_conj_mask = src_dirty_conj_mask,
2160 .src_mgr_enbs_src = src_mgr_enbs_src,
2161 .src_mgr_enb_src = src_mgr_enb_src,
2162 .src_mgr_dsb_src = src_mgr_dsb_src,
2163 .src_mgr_commit_write = src_mgr_commit_write,
2164
2165 .srcimp_mgr_get_ctrl_blk = srcimp_mgr_get_ctrl_blk,
2166 .srcimp_mgr_put_ctrl_blk = srcimp_mgr_put_ctrl_blk,
2167 .srcimp_mgr_set_imaparc = srcimp_mgr_set_imaparc,
2168 .srcimp_mgr_set_imapuser = srcimp_mgr_set_imapuser,
2169 .srcimp_mgr_set_imapnxt = srcimp_mgr_set_imapnxt,
2170 .srcimp_mgr_set_imapaddr = srcimp_mgr_set_imapaddr,
2171 .srcimp_mgr_commit_write = srcimp_mgr_commit_write,
2172
2173 .amixer_rsc_get_ctrl_blk = amixer_rsc_get_ctrl_blk,
2174 .amixer_rsc_put_ctrl_blk = amixer_rsc_put_ctrl_blk,
2175 .amixer_mgr_get_ctrl_blk = amixer_mgr_get_ctrl_blk,
2176 .amixer_mgr_put_ctrl_blk = amixer_mgr_put_ctrl_blk,
2177 .amixer_set_mode = amixer_set_mode,
2178 .amixer_set_iv = amixer_set_iv,
2179 .amixer_set_x = amixer_set_x,
2180 .amixer_set_y = amixer_set_y,
2181 .amixer_set_sadr = amixer_set_sadr,
2182 .amixer_set_se = amixer_set_se,
2183 .amixer_set_dirty = amixer_set_dirty,
2184 .amixer_set_dirty_all = amixer_set_dirty_all,
2185 .amixer_commit_write = amixer_commit_write,
2186 .amixer_get_y = amixer_get_y,
2187 .amixer_get_dirty = amixer_get_dirty,
2188
2189 .dai_get_ctrl_blk = dai_get_ctrl_blk,
2190 .dai_put_ctrl_blk = dai_put_ctrl_blk,
2191 .dai_srt_set_srco = dai_srt_set_srcr,
2192 .dai_srt_set_srcm = dai_srt_set_srcl,
2193 .dai_srt_set_rsr = dai_srt_set_rsr,
2194 .dai_srt_set_drat = dai_srt_set_drat,
2195 .dai_srt_set_ec = dai_srt_set_ec,
2196 .dai_srt_set_et = dai_srt_set_et,
2197 .dai_commit_write = dai_commit_write,
2198
2199 .dao_get_ctrl_blk = dao_get_ctrl_blk,
2200 .dao_put_ctrl_blk = dao_put_ctrl_blk,
2201 .dao_set_spos = dao_set_spos,
2202 .dao_commit_write = dao_commit_write,
2203 .dao_get_spos = dao_get_spos,
2204
2205 .daio_mgr_get_ctrl_blk = daio_mgr_get_ctrl_blk,
2206 .daio_mgr_put_ctrl_blk = daio_mgr_put_ctrl_blk,
2207 .daio_mgr_enb_dai = daio_mgr_enb_dai,
2208 .daio_mgr_dsb_dai = daio_mgr_dsb_dai,
2209 .daio_mgr_enb_dao = daio_mgr_enb_dao,
2210 .daio_mgr_dsb_dao = daio_mgr_dsb_dao,
2211 .daio_mgr_dao_init = daio_mgr_dao_init,
2212 .daio_mgr_set_imaparc = daio_mgr_set_imaparc,
2213 .daio_mgr_set_imapnxt = daio_mgr_set_imapnxt,
2214 .daio_mgr_set_imapaddr = daio_mgr_set_imapaddr,
2215 .daio_mgr_commit_write = daio_mgr_commit_write,
2216
2217 .set_timer_irq = set_timer_irq,
2218 .set_timer_tick = set_timer_tick,
2219 .get_wc = get_wc,
2220};
2221
2222int __devinit create_20k1_hw_obj(struct hw **rhw)
2223{
2224 struct hw20k1 *hw20k1;
2225
2226 *rhw = NULL;
2227 hw20k1 = kzalloc(sizeof(*hw20k1), GFP_KERNEL);
2228 if (NULL == hw20k1)
2229 return -ENOMEM;
2230
2231 spin_lock_init(&hw20k1->reg_20k1_lock);
2232 spin_lock_init(&hw20k1->reg_pci_lock);
2233
2234 hw20k1->hw = ct20k1_preset;
2235
2236 *rhw = &hw20k1->hw;
2237
2238 return 0;
2239}
2240
2241int destroy_20k1_hw_obj(struct hw *hw)
2242{
2243 if (hw->io_base)
2244 hw_card_shutdown(hw);
2245
2246 kfree(container_of(hw, struct hw20k1, hw));
2247 return 0;
2248}
diff --git a/sound/pci/ctxfi/cthw20k1.h b/sound/pci/ctxfi/cthw20k1.h
new file mode 100644
index 000000000000..02f72fb448a6
--- /dev/null
+++ b/sound/pci/ctxfi/cthw20k1.h
@@ -0,0 +1,26 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File cthw20k1.h
9 *
10 * @Brief
11 * This file contains the definition of hardware access methord.
12 *
13 * @Author Liu Chun
14 * @Date May 13 2008
15 *
16 */
17
18#ifndef CTHW20K1_H
19#define CTHW20K1_H
20
21#include "cthardware.h"
22
23int create_20k1_hw_obj(struct hw **rhw);
24int destroy_20k1_hw_obj(struct hw *hw);
25
26#endif /* CTHW20K1_H */
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
new file mode 100644
index 000000000000..4493a51c6b01
--- /dev/null
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -0,0 +1,2137 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File cthw20k2.c
9 *
10 * @Brief
11 * This file contains the implementation of hardware access methord for 20k2.
12 *
13 * @Author Liu Chun
14 * @Date May 14 2008
15 *
16 */
17
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/pci.h>
21#include <linux/io.h>
22#include <linux/string.h>
23#include <linux/kernel.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include "cthw20k2.h"
27#include "ct20k2reg.h"
28
29#if BITS_PER_LONG == 32
30#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
31#else
32#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
33#endif
34
35struct hw20k2 {
36 struct hw hw;
37 /* for i2c */
38 unsigned char dev_id;
39 unsigned char addr_size;
40 unsigned char data_size;
41};
42
43static u32 hw_read_20kx(struct hw *hw, u32 reg);
44static void hw_write_20kx(struct hw *hw, u32 reg, u32 data);
45
46/*
47 * Type definition block.
48 * The layout of control structures can be directly applied on 20k2 chip.
49 */
50
51/*
52 * SRC control block definitions.
53 */
54
55/* SRC resource control block */
56#define SRCCTL_STATE 0x00000007
57#define SRCCTL_BM 0x00000008
58#define SRCCTL_RSR 0x00000030
59#define SRCCTL_SF 0x000001C0
60#define SRCCTL_WR 0x00000200
61#define SRCCTL_PM 0x00000400
62#define SRCCTL_ROM 0x00001800
63#define SRCCTL_VO 0x00002000
64#define SRCCTL_ST 0x00004000
65#define SRCCTL_IE 0x00008000
66#define SRCCTL_ILSZ 0x000F0000
67#define SRCCTL_BP 0x00100000
68
69#define SRCCCR_CISZ 0x000007FF
70#define SRCCCR_CWA 0x001FF800
71#define SRCCCR_D 0x00200000
72#define SRCCCR_RS 0x01C00000
73#define SRCCCR_NAL 0x3E000000
74#define SRCCCR_RA 0xC0000000
75
76#define SRCCA_CA 0x0FFFFFFF
77#define SRCCA_RS 0xE0000000
78
79#define SRCSA_SA 0x0FFFFFFF
80
81#define SRCLA_LA 0x0FFFFFFF
82
83/* Mixer Parameter Ring ram Low and Hight register.
84 * Fixed-point value in 8.24 format for parameter channel */
85#define MPRLH_PITCH 0xFFFFFFFF
86
87/* SRC resource register dirty flags */
88union src_dirty {
89 struct {
90 u16 ctl:1;
91 u16 ccr:1;
92 u16 sa:1;
93 u16 la:1;
94 u16 ca:1;
95 u16 mpr:1;
96 u16 czbfs:1; /* Clear Z-Buffers */
97 u16 rsv:9;
98 } bf;
99 u16 data;
100};
101
102struct src_rsc_ctrl_blk {
103 unsigned int ctl;
104 unsigned int ccr;
105 unsigned int ca;
106 unsigned int sa;
107 unsigned int la;
108 unsigned int mpr;
109 union src_dirty dirty;
110};
111
112/* SRC manager control block */
113union src_mgr_dirty {
114 struct {
115 u16 enb0:1;
116 u16 enb1:1;
117 u16 enb2:1;
118 u16 enb3:1;
119 u16 enb4:1;
120 u16 enb5:1;
121 u16 enb6:1;
122 u16 enb7:1;
123 u16 enbsa:1;
124 u16 rsv:7;
125 } bf;
126 u16 data;
127};
128
129struct src_mgr_ctrl_blk {
130 unsigned int enbsa;
131 unsigned int enb[8];
132 union src_mgr_dirty dirty;
133};
134
135/* SRCIMP manager control block */
136#define SRCAIM_ARC 0x00000FFF
137#define SRCAIM_NXT 0x00FF0000
138#define SRCAIM_SRC 0xFF000000
139
140struct srcimap {
141 unsigned int srcaim;
142 unsigned int idx;
143};
144
145/* SRCIMP manager register dirty flags */
146union srcimp_mgr_dirty {
147 struct {
148 u16 srcimap:1;
149 u16 rsv:15;
150 } bf;
151 u16 data;
152};
153
154struct srcimp_mgr_ctrl_blk {
155 struct srcimap srcimap;
156 union srcimp_mgr_dirty dirty;
157};
158
159/*
160 * Function implementation block.
161 */
162
163static int src_get_rsc_ctrl_blk(void **rblk)
164{
165 struct src_rsc_ctrl_blk *blk;
166
167 *rblk = NULL;
168 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
169 if (NULL == blk)
170 return -ENOMEM;
171
172 *rblk = blk;
173
174 return 0;
175}
176
177static int src_put_rsc_ctrl_blk(void *blk)
178{
179 kfree(blk);
180
181 return 0;
182}
183
184static int src_set_state(void *blk, unsigned int state)
185{
186 struct src_rsc_ctrl_blk *ctl = blk;
187
188 set_field(&ctl->ctl, SRCCTL_STATE, state);
189 ctl->dirty.bf.ctl = 1;
190 return 0;
191}
192
193static int src_set_bm(void *blk, unsigned int bm)
194{
195 struct src_rsc_ctrl_blk *ctl = blk;
196
197 set_field(&ctl->ctl, SRCCTL_BM, bm);
198 ctl->dirty.bf.ctl = 1;
199 return 0;
200}
201
202static int src_set_rsr(void *blk, unsigned int rsr)
203{
204 struct src_rsc_ctrl_blk *ctl = blk;
205
206 set_field(&ctl->ctl, SRCCTL_RSR, rsr);
207 ctl->dirty.bf.ctl = 1;
208 return 0;
209}
210
211static int src_set_sf(void *blk, unsigned int sf)
212{
213 struct src_rsc_ctrl_blk *ctl = blk;
214
215 set_field(&ctl->ctl, SRCCTL_SF, sf);
216 ctl->dirty.bf.ctl = 1;
217 return 0;
218}
219
220static int src_set_wr(void *blk, unsigned int wr)
221{
222 struct src_rsc_ctrl_blk *ctl = blk;
223
224 set_field(&ctl->ctl, SRCCTL_WR, wr);
225 ctl->dirty.bf.ctl = 1;
226 return 0;
227}
228
229static int src_set_pm(void *blk, unsigned int pm)
230{
231 struct src_rsc_ctrl_blk *ctl = blk;
232
233 set_field(&ctl->ctl, SRCCTL_PM, pm);
234 ctl->dirty.bf.ctl = 1;
235 return 0;
236}
237
238static int src_set_rom(void *blk, unsigned int rom)
239{
240 struct src_rsc_ctrl_blk *ctl = blk;
241
242 set_field(&ctl->ctl, SRCCTL_ROM, rom);
243 ctl->dirty.bf.ctl = 1;
244 return 0;
245}
246
247static int src_set_vo(void *blk, unsigned int vo)
248{
249 struct src_rsc_ctrl_blk *ctl = blk;
250
251 set_field(&ctl->ctl, SRCCTL_VO, vo);
252 ctl->dirty.bf.ctl = 1;
253 return 0;
254}
255
256static int src_set_st(void *blk, unsigned int st)
257{
258 struct src_rsc_ctrl_blk *ctl = blk;
259
260 set_field(&ctl->ctl, SRCCTL_ST, st);
261 ctl->dirty.bf.ctl = 1;
262 return 0;
263}
264
265static int src_set_ie(void *blk, unsigned int ie)
266{
267 struct src_rsc_ctrl_blk *ctl = blk;
268
269 set_field(&ctl->ctl, SRCCTL_IE, ie);
270 ctl->dirty.bf.ctl = 1;
271 return 0;
272}
273
274static int src_set_ilsz(void *blk, unsigned int ilsz)
275{
276 struct src_rsc_ctrl_blk *ctl = blk;
277
278 set_field(&ctl->ctl, SRCCTL_ILSZ, ilsz);
279 ctl->dirty.bf.ctl = 1;
280 return 0;
281}
282
283static int src_set_bp(void *blk, unsigned int bp)
284{
285 struct src_rsc_ctrl_blk *ctl = blk;
286
287 set_field(&ctl->ctl, SRCCTL_BP, bp);
288 ctl->dirty.bf.ctl = 1;
289 return 0;
290}
291
292static int src_set_cisz(void *blk, unsigned int cisz)
293{
294 struct src_rsc_ctrl_blk *ctl = blk;
295
296 set_field(&ctl->ccr, SRCCCR_CISZ, cisz);
297 ctl->dirty.bf.ccr = 1;
298 return 0;
299}
300
301static int src_set_ca(void *blk, unsigned int ca)
302{
303 struct src_rsc_ctrl_blk *ctl = blk;
304
305 set_field(&ctl->ca, SRCCA_CA, ca);
306 ctl->dirty.bf.ca = 1;
307 return 0;
308}
309
310static int src_set_sa(void *blk, unsigned int sa)
311{
312 struct src_rsc_ctrl_blk *ctl = blk;
313
314 set_field(&ctl->sa, SRCSA_SA, sa);
315 ctl->dirty.bf.sa = 1;
316 return 0;
317}
318
319static int src_set_la(void *blk, unsigned int la)
320{
321 struct src_rsc_ctrl_blk *ctl = blk;
322
323 set_field(&ctl->la, SRCLA_LA, la);
324 ctl->dirty.bf.la = 1;
325 return 0;
326}
327
328static int src_set_pitch(void *blk, unsigned int pitch)
329{
330 struct src_rsc_ctrl_blk *ctl = blk;
331
332 set_field(&ctl->mpr, MPRLH_PITCH, pitch);
333 ctl->dirty.bf.mpr = 1;
334 return 0;
335}
336
337static int src_set_clear_zbufs(void *blk, unsigned int clear)
338{
339 ((struct src_rsc_ctrl_blk *)blk)->dirty.bf.czbfs = (clear ? 1 : 0);
340 return 0;
341}
342
343static int src_set_dirty(void *blk, unsigned int flags)
344{
345 ((struct src_rsc_ctrl_blk *)blk)->dirty.data = (flags & 0xffff);
346 return 0;
347}
348
349static int src_set_dirty_all(void *blk)
350{
351 ((struct src_rsc_ctrl_blk *)blk)->dirty.data = ~(0x0);
352 return 0;
353}
354
355#define AR_SLOT_SIZE 4096
356#define AR_SLOT_BLOCK_SIZE 16
357#define AR_PTS_PITCH 6
358#define AR_PARAM_SRC_OFFSET 0x60
359
360static unsigned int src_param_pitch_mixer(unsigned int src_idx)
361{
362 return ((src_idx << 4) + AR_PTS_PITCH + AR_SLOT_SIZE
363 - AR_PARAM_SRC_OFFSET) % AR_SLOT_SIZE;
364
365}
366
367static int src_commit_write(struct hw *hw, unsigned int idx, void *blk)
368{
369 struct src_rsc_ctrl_blk *ctl = blk;
370 int i;
371
372 if (ctl->dirty.bf.czbfs) {
373 /* Clear Z-Buffer registers */
374 for (i = 0; i < 8; i++)
375 hw_write_20kx(hw, SRC_UPZ+idx*0x100+i*0x4, 0);
376
377 for (i = 0; i < 4; i++)
378 hw_write_20kx(hw, SRC_DN0Z+idx*0x100+i*0x4, 0);
379
380 for (i = 0; i < 8; i++)
381 hw_write_20kx(hw, SRC_DN1Z+idx*0x100+i*0x4, 0);
382
383 ctl->dirty.bf.czbfs = 0;
384 }
385 if (ctl->dirty.bf.mpr) {
386 /* Take the parameter mixer resource in the same group as that
387 * the idx src is in for simplicity. Unlike src, all conjugate
388 * parameter mixer resources must be programmed for
389 * corresponding conjugate src resources. */
390 unsigned int pm_idx = src_param_pitch_mixer(idx);
391 hw_write_20kx(hw, MIXER_PRING_LO_HI+4*pm_idx, ctl->mpr);
392 hw_write_20kx(hw, MIXER_PMOPLO+8*pm_idx, 0x3);
393 hw_write_20kx(hw, MIXER_PMOPHI+8*pm_idx, 0x0);
394 ctl->dirty.bf.mpr = 0;
395 }
396 if (ctl->dirty.bf.sa) {
397 hw_write_20kx(hw, SRC_SA+idx*0x100, ctl->sa);
398 ctl->dirty.bf.sa = 0;
399 }
400 if (ctl->dirty.bf.la) {
401 hw_write_20kx(hw, SRC_LA+idx*0x100, ctl->la);
402 ctl->dirty.bf.la = 0;
403 }
404 if (ctl->dirty.bf.ca) {
405 hw_write_20kx(hw, SRC_CA+idx*0x100, ctl->ca);
406 ctl->dirty.bf.ca = 0;
407 }
408
409 /* Write srccf register */
410 hw_write_20kx(hw, SRC_CF+idx*0x100, 0x0);
411
412 if (ctl->dirty.bf.ccr) {
413 hw_write_20kx(hw, SRC_CCR+idx*0x100, ctl->ccr);
414 ctl->dirty.bf.ccr = 0;
415 }
416 if (ctl->dirty.bf.ctl) {
417 hw_write_20kx(hw, SRC_CTL+idx*0x100, ctl->ctl);
418 ctl->dirty.bf.ctl = 0;
419 }
420
421 return 0;
422}
423
424static int src_get_ca(struct hw *hw, unsigned int idx, void *blk)
425{
426 struct src_rsc_ctrl_blk *ctl = blk;
427
428 ctl->ca = hw_read_20kx(hw, SRC_CA+idx*0x100);
429 ctl->dirty.bf.ca = 0;
430
431 return get_field(ctl->ca, SRCCA_CA);
432}
433
434static unsigned int src_get_dirty(void *blk)
435{
436 return ((struct src_rsc_ctrl_blk *)blk)->dirty.data;
437}
438
439static unsigned int src_dirty_conj_mask(void)
440{
441 return 0x20;
442}
443
444static int src_mgr_enbs_src(void *blk, unsigned int idx)
445{
446 ((struct src_mgr_ctrl_blk *)blk)->enbsa |= (0x1 << ((idx%128)/4));
447 ((struct src_mgr_ctrl_blk *)blk)->dirty.bf.enbsa = 1;
448 ((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] |= (0x1 << (idx%32));
449 return 0;
450}
451
452static int src_mgr_enb_src(void *blk, unsigned int idx)
453{
454 ((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] |= (0x1 << (idx%32));
455 ((struct src_mgr_ctrl_blk *)blk)->dirty.data |= (0x1 << (idx/32));
456 return 0;
457}
458
459static int src_mgr_dsb_src(void *blk, unsigned int idx)
460{
461 ((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] &= ~(0x1 << (idx%32));
462 ((struct src_mgr_ctrl_blk *)blk)->dirty.data |= (0x1 << (idx/32));
463 return 0;
464}
465
466static int src_mgr_commit_write(struct hw *hw, void *blk)
467{
468 struct src_mgr_ctrl_blk *ctl = blk;
469 int i;
470 unsigned int ret;
471
472 if (ctl->dirty.bf.enbsa) {
473 do {
474 ret = hw_read_20kx(hw, SRC_ENBSTAT);
475 } while (ret & 0x1);
476 hw_write_20kx(hw, SRC_ENBSA, ctl->enbsa);
477 ctl->dirty.bf.enbsa = 0;
478 }
479 for (i = 0; i < 8; i++) {
480 if ((ctl->dirty.data & (0x1 << i))) {
481 hw_write_20kx(hw, SRC_ENB+(i*0x100), ctl->enb[i]);
482 ctl->dirty.data &= ~(0x1 << i);
483 }
484 }
485
486 return 0;
487}
488
489static int src_mgr_get_ctrl_blk(void **rblk)
490{
491 struct src_mgr_ctrl_blk *blk;
492
493 *rblk = NULL;
494 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
495 if (NULL == blk)
496 return -ENOMEM;
497
498 *rblk = blk;
499
500 return 0;
501}
502
503static int src_mgr_put_ctrl_blk(void *blk)
504{
505 kfree(blk);
506
507 return 0;
508}
509
510static int srcimp_mgr_get_ctrl_blk(void **rblk)
511{
512 struct srcimp_mgr_ctrl_blk *blk;
513
514 *rblk = NULL;
515 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
516 if (NULL == blk)
517 return -ENOMEM;
518
519 *rblk = blk;
520
521 return 0;
522}
523
524static int srcimp_mgr_put_ctrl_blk(void *blk)
525{
526 kfree(blk);
527
528 return 0;
529}
530
531static int srcimp_mgr_set_imaparc(void *blk, unsigned int slot)
532{
533 struct srcimp_mgr_ctrl_blk *ctl = blk;
534
535 set_field(&ctl->srcimap.srcaim, SRCAIM_ARC, slot);
536 ctl->dirty.bf.srcimap = 1;
537 return 0;
538}
539
540static int srcimp_mgr_set_imapuser(void *blk, unsigned int user)
541{
542 struct srcimp_mgr_ctrl_blk *ctl = blk;
543
544 set_field(&ctl->srcimap.srcaim, SRCAIM_SRC, user);
545 ctl->dirty.bf.srcimap = 1;
546 return 0;
547}
548
549static int srcimp_mgr_set_imapnxt(void *blk, unsigned int next)
550{
551 struct srcimp_mgr_ctrl_blk *ctl = blk;
552
553 set_field(&ctl->srcimap.srcaim, SRCAIM_NXT, next);
554 ctl->dirty.bf.srcimap = 1;
555 return 0;
556}
557
558static int srcimp_mgr_set_imapaddr(void *blk, unsigned int addr)
559{
560 ((struct srcimp_mgr_ctrl_blk *)blk)->srcimap.idx = addr;
561 ((struct srcimp_mgr_ctrl_blk *)blk)->dirty.bf.srcimap = 1;
562 return 0;
563}
564
565static int srcimp_mgr_commit_write(struct hw *hw, void *blk)
566{
567 struct srcimp_mgr_ctrl_blk *ctl = blk;
568
569 if (ctl->dirty.bf.srcimap) {
570 hw_write_20kx(hw, SRC_IMAP+ctl->srcimap.idx*0x100,
571 ctl->srcimap.srcaim);
572 ctl->dirty.bf.srcimap = 0;
573 }
574
575 return 0;
576}
577
578/*
579 * AMIXER control block definitions.
580 */
581
582#define AMOPLO_M 0x00000003
583#define AMOPLO_IV 0x00000004
584#define AMOPLO_X 0x0003FFF0
585#define AMOPLO_Y 0xFFFC0000
586
587#define AMOPHI_SADR 0x000000FF
588#define AMOPHI_SE 0x80000000
589
590/* AMIXER resource register dirty flags */
591union amixer_dirty {
592 struct {
593 u16 amoplo:1;
594 u16 amophi:1;
595 u16 rsv:14;
596 } bf;
597 u16 data;
598};
599
600/* AMIXER resource control block */
601struct amixer_rsc_ctrl_blk {
602 unsigned int amoplo;
603 unsigned int amophi;
604 union amixer_dirty dirty;
605};
606
607static int amixer_set_mode(void *blk, unsigned int mode)
608{
609 struct amixer_rsc_ctrl_blk *ctl = blk;
610
611 set_field(&ctl->amoplo, AMOPLO_M, mode);
612 ctl->dirty.bf.amoplo = 1;
613 return 0;
614}
615
616static int amixer_set_iv(void *blk, unsigned int iv)
617{
618 struct amixer_rsc_ctrl_blk *ctl = blk;
619
620 set_field(&ctl->amoplo, AMOPLO_IV, iv);
621 ctl->dirty.bf.amoplo = 1;
622 return 0;
623}
624
625static int amixer_set_x(void *blk, unsigned int x)
626{
627 struct amixer_rsc_ctrl_blk *ctl = blk;
628
629 set_field(&ctl->amoplo, AMOPLO_X, x);
630 ctl->dirty.bf.amoplo = 1;
631 return 0;
632}
633
634static int amixer_set_y(void *blk, unsigned int y)
635{
636 struct amixer_rsc_ctrl_blk *ctl = blk;
637
638 set_field(&ctl->amoplo, AMOPLO_Y, y);
639 ctl->dirty.bf.amoplo = 1;
640 return 0;
641}
642
643static int amixer_set_sadr(void *blk, unsigned int sadr)
644{
645 struct amixer_rsc_ctrl_blk *ctl = blk;
646
647 set_field(&ctl->amophi, AMOPHI_SADR, sadr);
648 ctl->dirty.bf.amophi = 1;
649 return 0;
650}
651
652static int amixer_set_se(void *blk, unsigned int se)
653{
654 struct amixer_rsc_ctrl_blk *ctl = blk;
655
656 set_field(&ctl->amophi, AMOPHI_SE, se);
657 ctl->dirty.bf.amophi = 1;
658 return 0;
659}
660
661static int amixer_set_dirty(void *blk, unsigned int flags)
662{
663 ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data = (flags & 0xffff);
664 return 0;
665}
666
667static int amixer_set_dirty_all(void *blk)
668{
669 ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data = ~(0x0);
670 return 0;
671}
672
673static int amixer_commit_write(struct hw *hw, unsigned int idx, void *blk)
674{
675 struct amixer_rsc_ctrl_blk *ctl = blk;
676
677 if (ctl->dirty.bf.amoplo || ctl->dirty.bf.amophi) {
678 hw_write_20kx(hw, MIXER_AMOPLO+idx*8, ctl->amoplo);
679 ctl->dirty.bf.amoplo = 0;
680 hw_write_20kx(hw, MIXER_AMOPHI+idx*8, ctl->amophi);
681 ctl->dirty.bf.amophi = 0;
682 }
683
684 return 0;
685}
686
687static int amixer_get_y(void *blk)
688{
689 struct amixer_rsc_ctrl_blk *ctl = blk;
690
691 return get_field(ctl->amoplo, AMOPLO_Y);
692}
693
694static unsigned int amixer_get_dirty(void *blk)
695{
696 return ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data;
697}
698
699static int amixer_rsc_get_ctrl_blk(void **rblk)
700{
701 struct amixer_rsc_ctrl_blk *blk;
702
703 *rblk = NULL;
704 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
705 if (NULL == blk)
706 return -ENOMEM;
707
708 *rblk = blk;
709
710 return 0;
711}
712
713static int amixer_rsc_put_ctrl_blk(void *blk)
714{
715 kfree(blk);
716
717 return 0;
718}
719
720static int amixer_mgr_get_ctrl_blk(void **rblk)
721{
722 *rblk = NULL;
723
724 return 0;
725}
726
727static int amixer_mgr_put_ctrl_blk(void *blk)
728{
729 return 0;
730}
731
732/*
733 * DAIO control block definitions.
734 */
735
736/* Receiver Sample Rate Tracker Control register */
737#define SRTCTL_SRCO 0x000000FF
738#define SRTCTL_SRCM 0x0000FF00
739#define SRTCTL_RSR 0x00030000
740#define SRTCTL_DRAT 0x00300000
741#define SRTCTL_EC 0x01000000
742#define SRTCTL_ET 0x10000000
743
744/* DAIO Receiver register dirty flags */
745union dai_dirty {
746 struct {
747 u16 srt:1;
748 u16 rsv:15;
749 } bf;
750 u16 data;
751};
752
753/* DAIO Receiver control block */
754struct dai_ctrl_blk {
755 unsigned int srt;
756 union dai_dirty dirty;
757};
758
759/* Audio Input Mapper RAM */
760#define AIM_ARC 0x00000FFF
761#define AIM_NXT 0x007F0000
762
763struct daoimap {
764 unsigned int aim;
765 unsigned int idx;
766};
767
768/* Audio Transmitter Control and Status register */
769#define ATXCTL_EN 0x00000001
770#define ATXCTL_MODE 0x00000010
771#define ATXCTL_CD 0x00000020
772#define ATXCTL_RAW 0x00000100
773#define ATXCTL_MT 0x00000200
774#define ATXCTL_NUC 0x00003000
775#define ATXCTL_BEN 0x00010000
776#define ATXCTL_BMUX 0x00700000
777#define ATXCTL_B24 0x01000000
778#define ATXCTL_CPF 0x02000000
779#define ATXCTL_RIV 0x10000000
780#define ATXCTL_LIV 0x20000000
781#define ATXCTL_RSAT 0x40000000
782#define ATXCTL_LSAT 0x80000000
783
784/* XDIF Transmitter register dirty flags */
785union dao_dirty {
786 struct {
787 u16 atxcsl:1;
788 u16 rsv:15;
789 } bf;
790 u16 data;
791};
792
793/* XDIF Transmitter control block */
794struct dao_ctrl_blk {
795 /* XDIF Transmitter Channel Status Low Register */
796 unsigned int atxcsl;
797 union dao_dirty dirty;
798};
799
800/* Audio Receiver Control register */
801#define ARXCTL_EN 0x00000001
802
803/* DAIO manager register dirty flags */
804union daio_mgr_dirty {
805 struct {
806 u32 atxctl:8;
807 u32 arxctl:8;
808 u32 daoimap:1;
809 u32 rsv:15;
810 } bf;
811 u32 data;
812};
813
814/* DAIO manager control block */
815struct daio_mgr_ctrl_blk {
816 struct daoimap daoimap;
817 unsigned int txctl[8];
818 unsigned int rxctl[8];
819 union daio_mgr_dirty dirty;
820};
821
822static int dai_srt_set_srco(void *blk, unsigned int src)
823{
824 struct dai_ctrl_blk *ctl = blk;
825
826 set_field(&ctl->srt, SRTCTL_SRCO, src);
827 ctl->dirty.bf.srt = 1;
828 return 0;
829}
830
831static int dai_srt_set_srcm(void *blk, unsigned int src)
832{
833 struct dai_ctrl_blk *ctl = blk;
834
835 set_field(&ctl->srt, SRTCTL_SRCM, src);
836 ctl->dirty.bf.srt = 1;
837 return 0;
838}
839
840static int dai_srt_set_rsr(void *blk, unsigned int rsr)
841{
842 struct dai_ctrl_blk *ctl = blk;
843
844 set_field(&ctl->srt, SRTCTL_RSR, rsr);
845 ctl->dirty.bf.srt = 1;
846 return 0;
847}
848
849static int dai_srt_set_drat(void *blk, unsigned int drat)
850{
851 struct dai_ctrl_blk *ctl = blk;
852
853 set_field(&ctl->srt, SRTCTL_DRAT, drat);
854 ctl->dirty.bf.srt = 1;
855 return 0;
856}
857
858static int dai_srt_set_ec(void *blk, unsigned int ec)
859{
860 struct dai_ctrl_blk *ctl = blk;
861
862 set_field(&ctl->srt, SRTCTL_EC, ec ? 1 : 0);
863 ctl->dirty.bf.srt = 1;
864 return 0;
865}
866
867static int dai_srt_set_et(void *blk, unsigned int et)
868{
869 struct dai_ctrl_blk *ctl = blk;
870
871 set_field(&ctl->srt, SRTCTL_ET, et ? 1 : 0);
872 ctl->dirty.bf.srt = 1;
873 return 0;
874}
875
876static int dai_commit_write(struct hw *hw, unsigned int idx, void *blk)
877{
878 struct dai_ctrl_blk *ctl = blk;
879
880 if (ctl->dirty.bf.srt) {
881 hw_write_20kx(hw, AUDIO_IO_RX_SRT_CTL+0x40*idx, ctl->srt);
882 ctl->dirty.bf.srt = 0;
883 }
884
885 return 0;
886}
887
888static int dai_get_ctrl_blk(void **rblk)
889{
890 struct dai_ctrl_blk *blk;
891
892 *rblk = NULL;
893 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
894 if (NULL == blk)
895 return -ENOMEM;
896
897 *rblk = blk;
898
899 return 0;
900}
901
902static int dai_put_ctrl_blk(void *blk)
903{
904 kfree(blk);
905
906 return 0;
907}
908
909static int dao_set_spos(void *blk, unsigned int spos)
910{
911 ((struct dao_ctrl_blk *)blk)->atxcsl = spos;
912 ((struct dao_ctrl_blk *)blk)->dirty.bf.atxcsl = 1;
913 return 0;
914}
915
916static int dao_commit_write(struct hw *hw, unsigned int idx, void *blk)
917{
918 struct dao_ctrl_blk *ctl = blk;
919
920 if (ctl->dirty.bf.atxcsl) {
921 if (idx < 4) {
922 /* S/PDIF SPOSx */
923 hw_write_20kx(hw, AUDIO_IO_TX_CSTAT_L+0x40*idx,
924 ctl->atxcsl);
925 }
926 ctl->dirty.bf.atxcsl = 0;
927 }
928
929 return 0;
930}
931
932static int dao_get_spos(void *blk, unsigned int *spos)
933{
934 *spos = ((struct dao_ctrl_blk *)blk)->atxcsl;
935 return 0;
936}
937
938static int dao_get_ctrl_blk(void **rblk)
939{
940 struct dao_ctrl_blk *blk;
941
942 *rblk = NULL;
943 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
944 if (NULL == blk)
945 return -ENOMEM;
946
947 *rblk = blk;
948
949 return 0;
950}
951
952static int dao_put_ctrl_blk(void *blk)
953{
954 kfree(blk);
955
956 return 0;
957}
958
959static int daio_mgr_enb_dai(void *blk, unsigned int idx)
960{
961 struct daio_mgr_ctrl_blk *ctl = blk;
962
963 set_field(&ctl->rxctl[idx], ARXCTL_EN, 1);
964 ctl->dirty.bf.arxctl |= (0x1 << idx);
965 return 0;
966}
967
968static int daio_mgr_dsb_dai(void *blk, unsigned int idx)
969{
970 struct daio_mgr_ctrl_blk *ctl = blk;
971
972 set_field(&ctl->rxctl[idx], ARXCTL_EN, 0);
973
974 ctl->dirty.bf.arxctl |= (0x1 << idx);
975 return 0;
976}
977
978static int daio_mgr_enb_dao(void *blk, unsigned int idx)
979{
980 struct daio_mgr_ctrl_blk *ctl = blk;
981
982 set_field(&ctl->txctl[idx], ATXCTL_EN, 1);
983 ctl->dirty.bf.atxctl |= (0x1 << idx);
984 return 0;
985}
986
987static int daio_mgr_dsb_dao(void *blk, unsigned int idx)
988{
989 struct daio_mgr_ctrl_blk *ctl = blk;
990
991 set_field(&ctl->txctl[idx], ATXCTL_EN, 0);
992 ctl->dirty.bf.atxctl |= (0x1 << idx);
993 return 0;
994}
995
996static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
997{
998 struct daio_mgr_ctrl_blk *ctl = blk;
999
1000 if (idx < 4) {
1001 /* S/PDIF output */
1002 switch ((conf & 0x7)) {
1003 case 1:
1004 set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
1005 break;
1006 case 2:
1007 set_field(&ctl->txctl[idx], ATXCTL_NUC, 1);
1008 break;
1009 case 4:
1010 set_field(&ctl->txctl[idx], ATXCTL_NUC, 2);
1011 break;
1012 case 8:
1013 set_field(&ctl->txctl[idx], ATXCTL_NUC, 3);
1014 break;
1015 default:
1016 break;
1017 }
1018 /* CDIF */
1019 set_field(&ctl->txctl[idx], ATXCTL_CD, (!(conf & 0x7)));
1020 /* Non-audio */
1021 set_field(&ctl->txctl[idx], ATXCTL_LIV, (conf >> 4) & 0x1);
1022 /* Non-audio */
1023 set_field(&ctl->txctl[idx], ATXCTL_RIV, (conf >> 4) & 0x1);
1024 set_field(&ctl->txctl[idx], ATXCTL_RAW,
1025 ((conf >> 3) & 0x1) ? 0 : 0);
1026 ctl->dirty.bf.atxctl |= (0x1 << idx);
1027 } else {
1028 /* I2S output */
1029 /*idx %= 4; */
1030 }
1031 return 0;
1032}
1033
1034static int daio_mgr_set_imaparc(void *blk, unsigned int slot)
1035{
1036 struct daio_mgr_ctrl_blk *ctl = blk;
1037
1038 set_field(&ctl->daoimap.aim, AIM_ARC, slot);
1039 ctl->dirty.bf.daoimap = 1;
1040 return 0;
1041}
1042
1043static int daio_mgr_set_imapnxt(void *blk, unsigned int next)
1044{
1045 struct daio_mgr_ctrl_blk *ctl = blk;
1046
1047 set_field(&ctl->daoimap.aim, AIM_NXT, next);
1048 ctl->dirty.bf.daoimap = 1;
1049 return 0;
1050}
1051
1052static int daio_mgr_set_imapaddr(void *blk, unsigned int addr)
1053{
1054 ((struct daio_mgr_ctrl_blk *)blk)->daoimap.idx = addr;
1055 ((struct daio_mgr_ctrl_blk *)blk)->dirty.bf.daoimap = 1;
1056 return 0;
1057}
1058
1059static int daio_mgr_commit_write(struct hw *hw, void *blk)
1060{
1061 struct daio_mgr_ctrl_blk *ctl = blk;
1062 unsigned int data;
1063 int i;
1064
1065 for (i = 0; i < 8; i++) {
1066 if ((ctl->dirty.bf.atxctl & (0x1 << i))) {
1067 data = ctl->txctl[i];
1068 hw_write_20kx(hw, (AUDIO_IO_TX_CTL+(0x40*i)), data);
1069 ctl->dirty.bf.atxctl &= ~(0x1 << i);
1070 mdelay(1);
1071 }
1072 if ((ctl->dirty.bf.arxctl & (0x1 << i))) {
1073 data = ctl->rxctl[i];
1074 hw_write_20kx(hw, (AUDIO_IO_RX_CTL+(0x40*i)), data);
1075 ctl->dirty.bf.arxctl &= ~(0x1 << i);
1076 mdelay(1);
1077 }
1078 }
1079 if (ctl->dirty.bf.daoimap) {
1080 hw_write_20kx(hw, AUDIO_IO_AIM+ctl->daoimap.idx*4,
1081 ctl->daoimap.aim);
1082 ctl->dirty.bf.daoimap = 0;
1083 }
1084
1085 return 0;
1086}
1087
1088static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
1089{
1090 struct daio_mgr_ctrl_blk *blk;
1091 int i;
1092
1093 *rblk = NULL;
1094 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
1095 if (NULL == blk)
1096 return -ENOMEM;
1097
1098 for (i = 0; i < 8; i++) {
1099 blk->txctl[i] = hw_read_20kx(hw, AUDIO_IO_TX_CTL+(0x40*i));
1100 blk->rxctl[i] = hw_read_20kx(hw, AUDIO_IO_RX_CTL+(0x40*i));
1101 }
1102
1103 *rblk = blk;
1104
1105 return 0;
1106}
1107
1108static int daio_mgr_put_ctrl_blk(void *blk)
1109{
1110 kfree(blk);
1111
1112 return 0;
1113}
1114
1115/* Card hardware initialization block */
1116struct dac_conf {
1117 unsigned int msr; /* master sample rate in rsrs */
1118};
1119
1120struct adc_conf {
1121 unsigned int msr; /* master sample rate in rsrs */
1122 unsigned char input; /* the input source of ADC */
1123 unsigned char mic20db; /* boost mic by 20db if input is microphone */
1124};
1125
1126struct daio_conf {
1127 unsigned int msr; /* master sample rate in rsrs */
1128};
1129
1130struct trn_conf {
1131 unsigned long vm_pgt_phys;
1132};
1133
1134static int hw_daio_init(struct hw *hw, const struct daio_conf *info)
1135{
1136 u32 data;
1137 int i;
1138
1139 /* Program I2S with proper sample rate and enable the correct I2S
1140 * channel. ED(0/8/16/24): Enable all I2S/I2X master clock output */
1141 if (1 == info->msr) {
1142 hw_write_20kx(hw, AUDIO_IO_MCLK, 0x01010101);
1143 hw_write_20kx(hw, AUDIO_IO_TX_BLRCLK, 0x01010101);
1144 hw_write_20kx(hw, AUDIO_IO_RX_BLRCLK, 0);
1145 } else if (2 == info->msr) {
1146 hw_write_20kx(hw, AUDIO_IO_MCLK, 0x11111111);
1147 /* Specify all playing 96khz
1148 * EA [0] - Enabled
1149 * RTA [4:5] - 96kHz
1150 * EB [8] - Enabled
1151 * RTB [12:13] - 96kHz
1152 * EC [16] - Enabled
1153 * RTC [20:21] - 96kHz
1154 * ED [24] - Enabled
1155 * RTD [28:29] - 96kHz */
1156 hw_write_20kx(hw, AUDIO_IO_TX_BLRCLK, 0x11111111);
1157 hw_write_20kx(hw, AUDIO_IO_RX_BLRCLK, 0);
1158 } else {
1159 printk(KERN_ALERT "ctxfi: ERROR!!! Invalid sampling rate!!!\n");
1160 return -EINVAL;
1161 }
1162
1163 for (i = 0; i < 8; i++) {
1164 if (i <= 3) {
1165 /* 1st 3 channels are SPDIFs (SB0960) */
1166 if (i == 3)
1167 data = 0x1001001;
1168 else
1169 data = 0x1000001;
1170
1171 hw_write_20kx(hw, (AUDIO_IO_TX_CTL+(0x40*i)), data);
1172 hw_write_20kx(hw, (AUDIO_IO_RX_CTL+(0x40*i)), data);
1173
1174 /* Initialize the SPDIF Out Channel status registers.
1175 * The value specified here is based on the typical
1176 * values provided in the specification, namely: Clock
1177 * Accuracy of 1000ppm, Sample Rate of 48KHz,
1178 * unspecified source number, Generation status = 1,
1179 * Category code = 0x12 (Digital Signal Mixer),
1180 * Mode = 0, Emph = 0, Copy Permitted, AN = 0
1181 * (indicating that we're transmitting digital audio,
1182 * and the Professional Use bit is 0. */
1183
1184 hw_write_20kx(hw, AUDIO_IO_TX_CSTAT_L+(0x40*i),
1185 0x02109204); /* Default to 48kHz */
1186
1187 hw_write_20kx(hw, AUDIO_IO_TX_CSTAT_H+(0x40*i), 0x0B);
1188 } else {
1189 /* Next 5 channels are I2S (SB0960) */
1190 data = 0x11;
1191 hw_write_20kx(hw, AUDIO_IO_RX_CTL+(0x40*i), data);
1192 if (2 == info->msr) {
1193 /* Four channels per sample period */
1194 data |= 0x1000;
1195 }
1196 hw_write_20kx(hw, AUDIO_IO_TX_CTL+(0x40*i), data);
1197 }
1198 }
1199
1200 return 0;
1201}
1202
1203/* TRANSPORT operations */
1204static int hw_trn_init(struct hw *hw, const struct trn_conf *info)
1205{
1206 u32 vmctl, data;
1207 u32 ptp_phys_low, ptp_phys_high;
1208 int i;
1209
1210 /* Set up device page table */
1211 if ((~0UL) == info->vm_pgt_phys) {
1212 printk(KERN_ALERT "ctxfi: "
1213 "Wrong device page table page address!!!\n");
1214 return -1;
1215 }
1216
1217 vmctl = 0x80000C0F; /* 32-bit, 4k-size page */
1218 ptp_phys_low = (u32)info->vm_pgt_phys;
1219 ptp_phys_high = upper_32_bits(info->vm_pgt_phys);
1220 if (sizeof(void *) == 8) /* 64bit address */
1221 vmctl |= (3 << 8);
1222 /* Write page table physical address to all PTPAL registers */
1223 for (i = 0; i < 64; i++) {
1224 hw_write_20kx(hw, VMEM_PTPAL+(16*i), ptp_phys_low);
1225 hw_write_20kx(hw, VMEM_PTPAH+(16*i), ptp_phys_high);
1226 }
1227 /* Enable virtual memory transfer */
1228 hw_write_20kx(hw, VMEM_CTL, vmctl);
1229 /* Enable transport bus master and queueing of request */
1230 hw_write_20kx(hw, TRANSPORT_CTL, 0x03);
1231 hw_write_20kx(hw, TRANSPORT_INT, 0x200c01);
1232 /* Enable transport ring */
1233 data = hw_read_20kx(hw, TRANSPORT_ENB);
1234 hw_write_20kx(hw, TRANSPORT_ENB, (data | 0x03));
1235
1236 return 0;
1237}
1238
1239/* Card initialization */
1240#define GCTL_AIE 0x00000001
1241#define GCTL_UAA 0x00000002
1242#define GCTL_DPC 0x00000004
1243#define GCTL_DBP 0x00000008
1244#define GCTL_ABP 0x00000010
1245#define GCTL_TBP 0x00000020
1246#define GCTL_SBP 0x00000040
1247#define GCTL_FBP 0x00000080
1248#define GCTL_ME 0x00000100
1249#define GCTL_AID 0x00001000
1250
1251#define PLLCTL_SRC 0x00000007
1252#define PLLCTL_SPE 0x00000008
1253#define PLLCTL_RD 0x000000F0
1254#define PLLCTL_FD 0x0001FF00
1255#define PLLCTL_OD 0x00060000
1256#define PLLCTL_B 0x00080000
1257#define PLLCTL_AS 0x00100000
1258#define PLLCTL_LF 0x03E00000
1259#define PLLCTL_SPS 0x1C000000
1260#define PLLCTL_AD 0x60000000
1261
1262#define PLLSTAT_CCS 0x00000007
1263#define PLLSTAT_SPL 0x00000008
1264#define PLLSTAT_CRD 0x000000F0
1265#define PLLSTAT_CFD 0x0001FF00
1266#define PLLSTAT_SL 0x00020000
1267#define PLLSTAT_FAS 0x00040000
1268#define PLLSTAT_B 0x00080000
1269#define PLLSTAT_PD 0x00100000
1270#define PLLSTAT_OCA 0x00200000
1271#define PLLSTAT_NCA 0x00400000
1272
1273static int hw_pll_init(struct hw *hw, unsigned int rsr)
1274{
1275 unsigned int pllenb;
1276 unsigned int pllctl;
1277 unsigned int pllstat;
1278 int i;
1279
1280 pllenb = 0xB;
1281 hw_write_20kx(hw, PLL_ENB, pllenb);
1282 pllctl = 0x20D00000;
1283 set_field(&pllctl, PLLCTL_FD, 16 - 4);
1284 hw_write_20kx(hw, PLL_CTL, pllctl);
1285 mdelay(40);
1286 pllctl = hw_read_20kx(hw, PLL_CTL);
1287 set_field(&pllctl, PLLCTL_B, 0);
1288 if (48000 == rsr) {
1289 set_field(&pllctl, PLLCTL_FD, 16 - 2);
1290 set_field(&pllctl, PLLCTL_RD, 1 - 1);
1291 } else { /* 44100 */
1292 set_field(&pllctl, PLLCTL_FD, 147 - 2);
1293 set_field(&pllctl, PLLCTL_RD, 10 - 1);
1294 }
1295 hw_write_20kx(hw, PLL_CTL, pllctl);
1296 mdelay(40);
1297 for (i = 0; i < 1000; i++) {
1298 pllstat = hw_read_20kx(hw, PLL_STAT);
1299 if (get_field(pllstat, PLLSTAT_PD))
1300 continue;
1301
1302 if (get_field(pllstat, PLLSTAT_B) !=
1303 get_field(pllctl, PLLCTL_B))
1304 continue;
1305
1306 if (get_field(pllstat, PLLSTAT_CCS) !=
1307 get_field(pllctl, PLLCTL_SRC))
1308 continue;
1309
1310 if (get_field(pllstat, PLLSTAT_CRD) !=
1311 get_field(pllctl, PLLCTL_RD))
1312 continue;
1313
1314 if (get_field(pllstat, PLLSTAT_CFD) !=
1315 get_field(pllctl, PLLCTL_FD))
1316 continue;
1317
1318 break;
1319 }
1320 if (i >= 1000) {
1321 printk(KERN_ALERT "ctxfi: PLL initialization failed!!!\n");
1322 return -EBUSY;
1323 }
1324
1325 return 0;
1326}
1327
1328static int hw_auto_init(struct hw *hw)
1329{
1330 unsigned int gctl;
1331 int i;
1332
1333 gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
1334 set_field(&gctl, GCTL_AIE, 0);
1335 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
1336 set_field(&gctl, GCTL_AIE, 1);
1337 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
1338 mdelay(10);
1339 for (i = 0; i < 400000; i++) {
1340 gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
1341 if (get_field(gctl, GCTL_AID))
1342 break;
1343 }
1344 if (!get_field(gctl, GCTL_AID)) {
1345 printk(KERN_ALERT "ctxfi: Card Auto-init failed!!!\n");
1346 return -EBUSY;
1347 }
1348
1349 return 0;
1350}
1351
1352/* DAC operations */
1353
1354#define CS4382_MC1 0x1
1355#define CS4382_MC2 0x2
1356#define CS4382_MC3 0x3
1357#define CS4382_FC 0x4
1358#define CS4382_IC 0x5
1359#define CS4382_XC1 0x6
1360#define CS4382_VCA1 0x7
1361#define CS4382_VCB1 0x8
1362#define CS4382_XC2 0x9
1363#define CS4382_VCA2 0xA
1364#define CS4382_VCB2 0xB
1365#define CS4382_XC3 0xC
1366#define CS4382_VCA3 0xD
1367#define CS4382_VCB3 0xE
1368#define CS4382_XC4 0xF
1369#define CS4382_VCA4 0x10
1370#define CS4382_VCB4 0x11
1371#define CS4382_CREV 0x12
1372
1373/* I2C status */
1374#define STATE_LOCKED 0x00
1375#define STATE_UNLOCKED 0xAA
1376#define DATA_READY 0x800000 /* Used with I2C_IF_STATUS */
1377#define DATA_ABORT 0x10000 /* Used with I2C_IF_STATUS */
1378
1379#define I2C_STATUS_DCM 0x00000001
1380#define I2C_STATUS_BC 0x00000006
1381#define I2C_STATUS_APD 0x00000008
1382#define I2C_STATUS_AB 0x00010000
1383#define I2C_STATUS_DR 0x00800000
1384
1385#define I2C_ADDRESS_PTAD 0x0000FFFF
1386#define I2C_ADDRESS_SLAD 0x007F0000
1387
1388struct regs_cs4382 {
1389 u32 mode_control_1;
1390 u32 mode_control_2;
1391 u32 mode_control_3;
1392
1393 u32 filter_control;
1394 u32 invert_control;
1395
1396 u32 mix_control_P1;
1397 u32 vol_control_A1;
1398 u32 vol_control_B1;
1399
1400 u32 mix_control_P2;
1401 u32 vol_control_A2;
1402 u32 vol_control_B2;
1403
1404 u32 mix_control_P3;
1405 u32 vol_control_A3;
1406 u32 vol_control_B3;
1407
1408 u32 mix_control_P4;
1409 u32 vol_control_A4;
1410 u32 vol_control_B4;
1411};
1412
1413static int hw20k2_i2c_unlock_full_access(struct hw *hw)
1414{
1415 u8 UnlockKeySequence_FLASH_FULLACCESS_MODE[2] = {0xB3, 0xD4};
1416
1417 /* Send keys for forced BIOS mode */
1418 hw_write_20kx(hw, I2C_IF_WLOCK,
1419 UnlockKeySequence_FLASH_FULLACCESS_MODE[0]);
1420 hw_write_20kx(hw, I2C_IF_WLOCK,
1421 UnlockKeySequence_FLASH_FULLACCESS_MODE[1]);
1422 /* Check whether the chip is unlocked */
1423 if (hw_read_20kx(hw, I2C_IF_WLOCK) == STATE_UNLOCKED)
1424 return 0;
1425
1426 return -1;
1427}
1428
1429static int hw20k2_i2c_lock_chip(struct hw *hw)
1430{
1431 /* Write twice */
1432 hw_write_20kx(hw, I2C_IF_WLOCK, STATE_LOCKED);
1433 hw_write_20kx(hw, I2C_IF_WLOCK, STATE_LOCKED);
1434 if (hw_read_20kx(hw, I2C_IF_WLOCK) == STATE_LOCKED)
1435 return 0;
1436
1437 return -1;
1438}
1439
1440static int hw20k2_i2c_init(struct hw *hw, u8 dev_id, u8 addr_size, u8 data_size)
1441{
1442 struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
1443 int err;
1444 unsigned int i2c_status;
1445 unsigned int i2c_addr;
1446
1447 err = hw20k2_i2c_unlock_full_access(hw);
1448 if (err < 0)
1449 return err;
1450
1451 hw20k2->addr_size = addr_size;
1452 hw20k2->data_size = data_size;
1453 hw20k2->dev_id = dev_id;
1454
1455 i2c_addr = 0;
1456 set_field(&i2c_addr, I2C_ADDRESS_SLAD, dev_id);
1457
1458 hw_write_20kx(hw, I2C_IF_ADDRESS, i2c_addr);
1459
1460 i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
1461
1462 set_field(&i2c_status, I2C_STATUS_DCM, 1); /* Direct control mode */
1463
1464 hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
1465
1466 return 0;
1467}
1468
1469static int hw20k2_i2c_uninit(struct hw *hw)
1470{
1471 unsigned int i2c_status;
1472 unsigned int i2c_addr;
1473
1474 i2c_addr = 0;
1475 set_field(&i2c_addr, I2C_ADDRESS_SLAD, 0x57); /* I2C id */
1476
1477 hw_write_20kx(hw, I2C_IF_ADDRESS, i2c_addr);
1478
1479 i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
1480
1481 set_field(&i2c_status, I2C_STATUS_DCM, 0); /* I2C mode */
1482
1483 hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
1484
1485 return hw20k2_i2c_lock_chip(hw);
1486}
1487
1488static int hw20k2_i2c_wait_data_ready(struct hw *hw)
1489{
1490 int i = 0x400000;
1491 unsigned int ret;
1492
1493 do {
1494 ret = hw_read_20kx(hw, I2C_IF_STATUS);
1495 } while ((!(ret & DATA_READY)) && --i);
1496
1497 return i;
1498}
1499
1500static int hw20k2_i2c_read(struct hw *hw, u16 addr, u32 *datap)
1501{
1502 struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
1503 unsigned int i2c_status;
1504
1505 i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
1506 set_field(&i2c_status, I2C_STATUS_BC,
1507 (4 == hw20k2->addr_size) ? 0 : hw20k2->addr_size);
1508 hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
1509 if (!hw20k2_i2c_wait_data_ready(hw))
1510 return -1;
1511
1512 hw_write_20kx(hw, I2C_IF_WDATA, addr);
1513 if (!hw20k2_i2c_wait_data_ready(hw))
1514 return -1;
1515
1516 /* Force a read operation */
1517 hw_write_20kx(hw, I2C_IF_RDATA, 0);
1518 if (!hw20k2_i2c_wait_data_ready(hw))
1519 return -1;
1520
1521 *datap = hw_read_20kx(hw, I2C_IF_RDATA);
1522
1523 return 0;
1524}
1525
1526static int hw20k2_i2c_write(struct hw *hw, u16 addr, u32 data)
1527{
1528 struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
1529 unsigned int i2c_data = (data << (hw20k2->addr_size * 8)) | addr;
1530 unsigned int i2c_status;
1531
1532 i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
1533
1534 set_field(&i2c_status, I2C_STATUS_BC,
1535 (4 == (hw20k2->addr_size + hw20k2->data_size)) ?
1536 0 : (hw20k2->addr_size + hw20k2->data_size));
1537
1538 hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
1539 hw20k2_i2c_wait_data_ready(hw);
1540 /* Dummy write to trigger the write oprtation */
1541 hw_write_20kx(hw, I2C_IF_WDATA, 0);
1542 hw20k2_i2c_wait_data_ready(hw);
1543
1544 /* This is the real data */
1545 hw_write_20kx(hw, I2C_IF_WDATA, i2c_data);
1546 hw20k2_i2c_wait_data_ready(hw);
1547
1548 return 0;
1549}
1550
1551static int hw_dac_init(struct hw *hw, const struct dac_conf *info)
1552{
1553 int err;
1554 u32 data;
1555 int i;
1556 struct regs_cs4382 cs_read = {0};
1557 struct regs_cs4382 cs_def = {
1558 0x00000001, /* Mode Control 1 */
1559 0x00000000, /* Mode Control 2 */
1560 0x00000084, /* Mode Control 3 */
1561 0x00000000, /* Filter Control */
1562 0x00000000, /* Invert Control */
1563 0x00000024, /* Mixing Control Pair 1 */
1564 0x00000000, /* Vol Control A1 */
1565 0x00000000, /* Vol Control B1 */
1566 0x00000024, /* Mixing Control Pair 2 */
1567 0x00000000, /* Vol Control A2 */
1568 0x00000000, /* Vol Control B2 */
1569 0x00000024, /* Mixing Control Pair 3 */
1570 0x00000000, /* Vol Control A3 */
1571 0x00000000, /* Vol Control B3 */
1572 0x00000024, /* Mixing Control Pair 4 */
1573 0x00000000, /* Vol Control A4 */
1574 0x00000000 /* Vol Control B4 */
1575 };
1576
1577 /* Set DAC reset bit as output */
1578 data = hw_read_20kx(hw, GPIO_CTRL);
1579 data |= 0x02;
1580 hw_write_20kx(hw, GPIO_CTRL, data);
1581
1582 err = hw20k2_i2c_init(hw, 0x18, 1, 1);
1583 if (err < 0)
1584 goto End;
1585
1586 for (i = 0; i < 2; i++) {
1587 /* Reset DAC twice just in-case the chip
1588 * didn't initialized properly */
1589 data = hw_read_20kx(hw, GPIO_DATA);
1590 /* GPIO data bit 1 */
1591 data &= 0xFFFFFFFD;
1592 hw_write_20kx(hw, GPIO_DATA, data);
1593 mdelay(10);
1594 data |= 0x2;
1595 hw_write_20kx(hw, GPIO_DATA, data);
1596 mdelay(50);
1597
1598 /* Reset the 2nd time */
1599 data &= 0xFFFFFFFD;
1600 hw_write_20kx(hw, GPIO_DATA, data);
1601 mdelay(10);
1602 data |= 0x2;
1603 hw_write_20kx(hw, GPIO_DATA, data);
1604 mdelay(50);
1605
1606 if (hw20k2_i2c_read(hw, CS4382_MC1, &cs_read.mode_control_1))
1607 continue;
1608
1609 if (hw20k2_i2c_read(hw, CS4382_MC2, &cs_read.mode_control_2))
1610 continue;
1611
1612 if (hw20k2_i2c_read(hw, CS4382_MC3, &cs_read.mode_control_3))
1613 continue;
1614
1615 if (hw20k2_i2c_read(hw, CS4382_FC, &cs_read.filter_control))
1616 continue;
1617
1618 if (hw20k2_i2c_read(hw, CS4382_IC, &cs_read.invert_control))
1619 continue;
1620
1621 if (hw20k2_i2c_read(hw, CS4382_XC1, &cs_read.mix_control_P1))
1622 continue;
1623
1624 if (hw20k2_i2c_read(hw, CS4382_VCA1, &cs_read.vol_control_A1))
1625 continue;
1626
1627 if (hw20k2_i2c_read(hw, CS4382_VCB1, &cs_read.vol_control_B1))
1628 continue;
1629
1630 if (hw20k2_i2c_read(hw, CS4382_XC2, &cs_read.mix_control_P2))
1631 continue;
1632
1633 if (hw20k2_i2c_read(hw, CS4382_VCA2, &cs_read.vol_control_A2))
1634 continue;
1635
1636 if (hw20k2_i2c_read(hw, CS4382_VCB2, &cs_read.vol_control_B2))
1637 continue;
1638
1639 if (hw20k2_i2c_read(hw, CS4382_XC3, &cs_read.mix_control_P3))
1640 continue;
1641
1642 if (hw20k2_i2c_read(hw, CS4382_VCA3, &cs_read.vol_control_A3))
1643 continue;
1644
1645 if (hw20k2_i2c_read(hw, CS4382_VCB3, &cs_read.vol_control_B3))
1646 continue;
1647
1648 if (hw20k2_i2c_read(hw, CS4382_XC4, &cs_read.mix_control_P4))
1649 continue;
1650
1651 if (hw20k2_i2c_read(hw, CS4382_VCA4, &cs_read.vol_control_A4))
1652 continue;
1653
1654 if (hw20k2_i2c_read(hw, CS4382_VCB4, &cs_read.vol_control_B4))
1655 continue;
1656
1657 if (memcmp(&cs_read, &cs_def, sizeof(cs_read)))
1658 continue;
1659 else
1660 break;
1661 }
1662
1663 if (i >= 2)
1664 goto End;
1665
1666 /* Note: Every I2C write must have some delay.
1667 * This is not a requirement but the delay works here... */
1668 hw20k2_i2c_write(hw, CS4382_MC1, 0x80);
1669 hw20k2_i2c_write(hw, CS4382_MC2, 0x10);
1670 if (1 == info->msr) {
1671 hw20k2_i2c_write(hw, CS4382_XC1, 0x24);
1672 hw20k2_i2c_write(hw, CS4382_XC2, 0x24);
1673 hw20k2_i2c_write(hw, CS4382_XC3, 0x24);
1674 hw20k2_i2c_write(hw, CS4382_XC4, 0x24);
1675 } else if (2 == info->msr) {
1676 hw20k2_i2c_write(hw, CS4382_XC1, 0x25);
1677 hw20k2_i2c_write(hw, CS4382_XC2, 0x25);
1678 hw20k2_i2c_write(hw, CS4382_XC3, 0x25);
1679 hw20k2_i2c_write(hw, CS4382_XC4, 0x25);
1680 } else {
1681 hw20k2_i2c_write(hw, CS4382_XC1, 0x26);
1682 hw20k2_i2c_write(hw, CS4382_XC2, 0x26);
1683 hw20k2_i2c_write(hw, CS4382_XC3, 0x26);
1684 hw20k2_i2c_write(hw, CS4382_XC4, 0x26);
1685 }
1686
1687 return 0;
1688End:
1689
1690 hw20k2_i2c_uninit(hw);
1691 return -1;
1692}
1693
1694/* ADC operations */
1695#define MAKE_WM8775_ADDR(addr, data) (u32)(((addr<<1)&0xFE)|((data>>8)&0x1))
1696#define MAKE_WM8775_DATA(data) (u32)(data&0xFF)
1697
1698#define WM8775_IC 0x0B
1699#define WM8775_MMC 0x0C
1700#define WM8775_AADCL 0x0E
1701#define WM8775_AADCR 0x0F
1702#define WM8775_ADCMC 0x15
1703#define WM8775_RESET 0x17
1704
1705static int hw_is_adc_input_selected(struct hw *hw, enum ADCSRC type)
1706{
1707 u32 data;
1708
1709 data = hw_read_20kx(hw, GPIO_DATA);
1710 switch (type) {
1711 case ADC_MICIN:
1712 data = (data & (0x1 << 14)) ? 1 : 0;
1713 break;
1714 case ADC_LINEIN:
1715 data = (data & (0x1 << 14)) ? 0 : 1;
1716 break;
1717 default:
1718 data = 0;
1719 }
1720 return data;
1721}
1722
1723static int hw_adc_input_select(struct hw *hw, enum ADCSRC type)
1724{
1725 u32 data;
1726
1727 data = hw_read_20kx(hw, GPIO_DATA);
1728 switch (type) {
1729 case ADC_MICIN:
1730 data |= (0x1 << 14);
1731 hw_write_20kx(hw, GPIO_DATA, data);
1732 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_ADCMC, 0x101),
1733 MAKE_WM8775_DATA(0x101)); /* Mic-in */
1734 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCL, 0xE7),
1735 MAKE_WM8775_DATA(0xE7)); /* +12dB boost */
1736 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCR, 0xE7),
1737 MAKE_WM8775_DATA(0xE7)); /* +12dB boost */
1738 break;
1739 case ADC_LINEIN:
1740 data &= ~(0x1 << 14);
1741 hw_write_20kx(hw, GPIO_DATA, data);
1742 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_ADCMC, 0x102),
1743 MAKE_WM8775_DATA(0x102)); /* Line-in */
1744 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCL, 0xCF),
1745 MAKE_WM8775_DATA(0xCF)); /* No boost */
1746 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCR, 0xCF),
1747 MAKE_WM8775_DATA(0xCF)); /* No boost */
1748 break;
1749 default:
1750 break;
1751 }
1752
1753 return 0;
1754}
1755
1756static int hw_adc_init(struct hw *hw, const struct adc_conf *info)
1757{
1758 int err;
1759 u32 mux = 2, data, ctl;
1760
1761 /* Set ADC reset bit as output */
1762 data = hw_read_20kx(hw, GPIO_CTRL);
1763 data |= (0x1 << 15);
1764 hw_write_20kx(hw, GPIO_CTRL, data);
1765
1766 /* Initialize I2C */
1767 err = hw20k2_i2c_init(hw, 0x1A, 1, 1);
1768 if (err < 0) {
1769 printk(KERN_ALERT "ctxfi: Failure to acquire I2C!!!\n");
1770 goto error;
1771 }
1772
1773 /* Make ADC in normal operation */
1774 data = hw_read_20kx(hw, GPIO_DATA);
1775 data &= ~(0x1 << 15);
1776 mdelay(10);
1777 data |= (0x1 << 15);
1778 hw_write_20kx(hw, GPIO_DATA, data);
1779 mdelay(50);
1780
1781 /* Set the master mode (256fs) */
1782 if (1 == info->msr) {
1783 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_MMC, 0x02),
1784 MAKE_WM8775_DATA(0x02));
1785 } else if (2 == info->msr) {
1786 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_MMC, 0x0A),
1787 MAKE_WM8775_DATA(0x0A));
1788 } else {
1789 printk(KERN_ALERT "ctxfi: Invalid master sampling "
1790 "rate (msr %d)!!!\n", info->msr);
1791 err = -EINVAL;
1792 goto error;
1793 }
1794
1795 /* Configure GPIO bit 14 change to line-in/mic-in */
1796 ctl = hw_read_20kx(hw, GPIO_CTRL);
1797 ctl |= 0x1 << 14;
1798 hw_write_20kx(hw, GPIO_CTRL, ctl);
1799
1800 /* Check using Mic-in or Line-in */
1801 data = hw_read_20kx(hw, GPIO_DATA);
1802
1803 if (mux == 1) {
1804 /* Configures GPIO data to select Mic-in */
1805 data |= 0x1 << 14;
1806 hw_write_20kx(hw, GPIO_DATA, data);
1807
1808 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_ADCMC, 0x101),
1809 MAKE_WM8775_DATA(0x101)); /* Mic-in */
1810 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCL, 0xE7),
1811 MAKE_WM8775_DATA(0xE7)); /* +12dB boost */
1812 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCR, 0xE7),
1813 MAKE_WM8775_DATA(0xE7)); /* +12dB boost */
1814 } else if (mux == 2) {
1815 /* Configures GPIO data to select Line-in */
1816 data &= ~(0x1 << 14);
1817 hw_write_20kx(hw, GPIO_DATA, data);
1818
1819 /* Setup ADC */
1820 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_ADCMC, 0x102),
1821 MAKE_WM8775_DATA(0x102)); /* Line-in */
1822 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCL, 0xCF),
1823 MAKE_WM8775_DATA(0xCF)); /* No boost */
1824 hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCR, 0xCF),
1825 MAKE_WM8775_DATA(0xCF)); /* No boost */
1826 } else {
1827 printk(KERN_ALERT "ctxfi: ERROR!!! Invalid input mux!!!\n");
1828 err = -EINVAL;
1829 goto error;
1830 }
1831
1832 return 0;
1833
1834error:
1835 hw20k2_i2c_uninit(hw);
1836 return err;
1837}
1838
1839static int hw_have_digit_io_switch(struct hw *hw)
1840{
1841 return 0;
1842}
1843
1844static int hw_card_start(struct hw *hw)
1845{
1846 int err = 0;
1847 struct pci_dev *pci = hw->pci;
1848 unsigned int gctl;
1849
1850 err = pci_enable_device(pci);
1851 if (err < 0)
1852 return err;
1853
1854 /* Set DMA transfer mask */
1855 if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
1856 pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
1857 printk(KERN_ERR "ctxfi: architecture does not support PCI "
1858 "busmaster DMA with mask 0x%llx\n", CT_XFI_DMA_MASK);
1859 err = -ENXIO;
1860 goto error1;
1861 }
1862
1863 err = pci_request_regions(pci, "XFi");
1864 if (err < 0)
1865 goto error1;
1866
1867 hw->io_base = pci_resource_start(hw->pci, 2);
1868 hw->mem_base = (unsigned long)ioremap(hw->io_base,
1869 pci_resource_len(hw->pci, 2));
1870 if (NULL == (void *)hw->mem_base) {
1871 err = -ENOENT;
1872 goto error2;
1873 }
1874
1875 /* Switch to 20k2 mode from UAA mode. */
1876 gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
1877 set_field(&gctl, GCTL_UAA, 0);
1878 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
1879
1880 /*if ((err = request_irq(pci->irq, ct_atc_interrupt, IRQF_SHARED,
1881 atc->chip_details->nm_card, hw))) {
1882 goto error3;
1883 }
1884 hw->irq = pci->irq;
1885 */
1886
1887 pci_set_master(pci);
1888
1889 return 0;
1890
1891/*error3:
1892 iounmap((void *)hw->mem_base);
1893 hw->mem_base = (unsigned long)NULL;*/
1894error2:
1895 pci_release_regions(pci);
1896 hw->io_base = 0;
1897error1:
1898 pci_disable_device(pci);
1899 return err;
1900}
1901
1902static int hw_card_stop(struct hw *hw)
1903{
1904 /* TODO: Disable interrupt and so on... */
1905 return 0;
1906}
1907
1908static int hw_card_shutdown(struct hw *hw)
1909{
1910 if (hw->irq >= 0)
1911 free_irq(hw->irq, hw);
1912
1913 hw->irq = -1;
1914
1915 if (NULL != ((void *)hw->mem_base))
1916 iounmap((void *)hw->mem_base);
1917
1918 hw->mem_base = (unsigned long)NULL;
1919
1920 if (hw->io_base)
1921 pci_release_regions(hw->pci);
1922
1923 hw->io_base = 0;
1924
1925 pci_disable_device(hw->pci);
1926
1927 return 0;
1928}
1929
1930static int hw_card_init(struct hw *hw, struct card_conf *info)
1931{
1932 int err;
1933 unsigned int gctl;
1934 u32 data = 0;
1935 struct dac_conf dac_info = {0};
1936 struct adc_conf adc_info = {0};
1937 struct daio_conf daio_info = {0};
1938 struct trn_conf trn_info = {0};
1939
1940 /* Get PCI io port/memory base address and
1941 * do 20kx core switch if needed. */
1942 if (!hw->io_base) {
1943 err = hw_card_start(hw);
1944 if (err)
1945 return err;
1946 }
1947
1948 /* PLL init */
1949 err = hw_pll_init(hw, info->rsr);
1950 if (err < 0)
1951 return err;
1952
1953 /* kick off auto-init */
1954 err = hw_auto_init(hw);
1955 if (err < 0)
1956 return err;
1957
1958 gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
1959 set_field(&gctl, GCTL_DBP, 1);
1960 set_field(&gctl, GCTL_TBP, 1);
1961 set_field(&gctl, GCTL_FBP, 1);
1962 set_field(&gctl, GCTL_DPC, 0);
1963 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
1964
1965 /* Reset all global pending interrupts */
1966 hw_write_20kx(hw, INTERRUPT_GIE, 0);
1967 /* Reset all SRC pending interrupts */
1968 hw_write_20kx(hw, SRC_IP, 0);
1969
1970 /* TODO: detect the card ID and configure GPIO accordingly. */
1971 /* Configures GPIO (0xD802 0x98028) */
1972 /*hw_write_20kx(hw, GPIO_CTRL, 0x7F07);*/
1973 /* Configures GPIO (SB0880) */
1974 /*hw_write_20kx(hw, GPIO_CTRL, 0xFF07);*/
1975 hw_write_20kx(hw, GPIO_CTRL, 0xD802);
1976
1977 /* Enable audio ring */
1978 hw_write_20kx(hw, MIXER_AR_ENABLE, 0x01);
1979
1980 trn_info.vm_pgt_phys = info->vm_pgt_phys;
1981 err = hw_trn_init(hw, &trn_info);
1982 if (err < 0)
1983 return err;
1984
1985 daio_info.msr = info->msr;
1986 err = hw_daio_init(hw, &daio_info);
1987 if (err < 0)
1988 return err;
1989
1990 dac_info.msr = info->msr;
1991 err = hw_dac_init(hw, &dac_info);
1992 if (err < 0)
1993 return err;
1994
1995 adc_info.msr = info->msr;
1996 adc_info.input = ADC_LINEIN;
1997 adc_info.mic20db = 0;
1998 err = hw_adc_init(hw, &adc_info);
1999 if (err < 0)
2000 return err;
2001
2002 data = hw_read_20kx(hw, SRC_MCTL);
2003 data |= 0x1; /* Enables input from the audio ring */
2004 hw_write_20kx(hw, SRC_MCTL, data);
2005
2006 return 0;
2007}
2008
2009static u32 hw_read_20kx(struct hw *hw, u32 reg)
2010{
2011 return readl((void *)(hw->mem_base + reg));
2012}
2013
2014static void hw_write_20kx(struct hw *hw, u32 reg, u32 data)
2015{
2016 writel(data, (void *)(hw->mem_base + reg));
2017}
2018
2019static struct hw ct20k2_preset __devinitdata = {
2020 .irq = -1,
2021
2022 .card_init = hw_card_init,
2023 .card_stop = hw_card_stop,
2024 .pll_init = hw_pll_init,
2025 .is_adc_source_selected = hw_is_adc_input_selected,
2026 .select_adc_source = hw_adc_input_select,
2027 .have_digit_io_switch = hw_have_digit_io_switch,
2028
2029 .src_rsc_get_ctrl_blk = src_get_rsc_ctrl_blk,
2030 .src_rsc_put_ctrl_blk = src_put_rsc_ctrl_blk,
2031 .src_mgr_get_ctrl_blk = src_mgr_get_ctrl_blk,
2032 .src_mgr_put_ctrl_blk = src_mgr_put_ctrl_blk,
2033 .src_set_state = src_set_state,
2034 .src_set_bm = src_set_bm,
2035 .src_set_rsr = src_set_rsr,
2036 .src_set_sf = src_set_sf,
2037 .src_set_wr = src_set_wr,
2038 .src_set_pm = src_set_pm,
2039 .src_set_rom = src_set_rom,
2040 .src_set_vo = src_set_vo,
2041 .src_set_st = src_set_st,
2042 .src_set_ie = src_set_ie,
2043 .src_set_ilsz = src_set_ilsz,
2044 .src_set_bp = src_set_bp,
2045 .src_set_cisz = src_set_cisz,
2046 .src_set_ca = src_set_ca,
2047 .src_set_sa = src_set_sa,
2048 .src_set_la = src_set_la,
2049 .src_set_pitch = src_set_pitch,
2050 .src_set_dirty = src_set_dirty,
2051 .src_set_clear_zbufs = src_set_clear_zbufs,
2052 .src_set_dirty_all = src_set_dirty_all,
2053 .src_commit_write = src_commit_write,
2054 .src_get_ca = src_get_ca,
2055 .src_get_dirty = src_get_dirty,
2056 .src_dirty_conj_mask = src_dirty_conj_mask,
2057 .src_mgr_enbs_src = src_mgr_enbs_src,
2058 .src_mgr_enb_src = src_mgr_enb_src,
2059 .src_mgr_dsb_src = src_mgr_dsb_src,
2060 .src_mgr_commit_write = src_mgr_commit_write,
2061
2062 .srcimp_mgr_get_ctrl_blk = srcimp_mgr_get_ctrl_blk,
2063 .srcimp_mgr_put_ctrl_blk = srcimp_mgr_put_ctrl_blk,
2064 .srcimp_mgr_set_imaparc = srcimp_mgr_set_imaparc,
2065 .srcimp_mgr_set_imapuser = srcimp_mgr_set_imapuser,
2066 .srcimp_mgr_set_imapnxt = srcimp_mgr_set_imapnxt,
2067 .srcimp_mgr_set_imapaddr = srcimp_mgr_set_imapaddr,
2068 .srcimp_mgr_commit_write = srcimp_mgr_commit_write,
2069
2070 .amixer_rsc_get_ctrl_blk = amixer_rsc_get_ctrl_blk,
2071 .amixer_rsc_put_ctrl_blk = amixer_rsc_put_ctrl_blk,
2072 .amixer_mgr_get_ctrl_blk = amixer_mgr_get_ctrl_blk,
2073 .amixer_mgr_put_ctrl_blk = amixer_mgr_put_ctrl_blk,
2074 .amixer_set_mode = amixer_set_mode,
2075 .amixer_set_iv = amixer_set_iv,
2076 .amixer_set_x = amixer_set_x,
2077 .amixer_set_y = amixer_set_y,
2078 .amixer_set_sadr = amixer_set_sadr,
2079 .amixer_set_se = amixer_set_se,
2080 .amixer_set_dirty = amixer_set_dirty,
2081 .amixer_set_dirty_all = amixer_set_dirty_all,
2082 .amixer_commit_write = amixer_commit_write,
2083 .amixer_get_y = amixer_get_y,
2084 .amixer_get_dirty = amixer_get_dirty,
2085
2086 .dai_get_ctrl_blk = dai_get_ctrl_blk,
2087 .dai_put_ctrl_blk = dai_put_ctrl_blk,
2088 .dai_srt_set_srco = dai_srt_set_srco,
2089 .dai_srt_set_srcm = dai_srt_set_srcm,
2090 .dai_srt_set_rsr = dai_srt_set_rsr,
2091 .dai_srt_set_drat = dai_srt_set_drat,
2092 .dai_srt_set_ec = dai_srt_set_ec,
2093 .dai_srt_set_et = dai_srt_set_et,
2094 .dai_commit_write = dai_commit_write,
2095
2096 .dao_get_ctrl_blk = dao_get_ctrl_blk,
2097 .dao_put_ctrl_blk = dao_put_ctrl_blk,
2098 .dao_set_spos = dao_set_spos,
2099 .dao_commit_write = dao_commit_write,
2100 .dao_get_spos = dao_get_spos,
2101
2102 .daio_mgr_get_ctrl_blk = daio_mgr_get_ctrl_blk,
2103 .daio_mgr_put_ctrl_blk = daio_mgr_put_ctrl_blk,
2104 .daio_mgr_enb_dai = daio_mgr_enb_dai,
2105 .daio_mgr_dsb_dai = daio_mgr_dsb_dai,
2106 .daio_mgr_enb_dao = daio_mgr_enb_dao,
2107 .daio_mgr_dsb_dao = daio_mgr_dsb_dao,
2108 .daio_mgr_dao_init = daio_mgr_dao_init,
2109 .daio_mgr_set_imaparc = daio_mgr_set_imaparc,
2110 .daio_mgr_set_imapnxt = daio_mgr_set_imapnxt,
2111 .daio_mgr_set_imapaddr = daio_mgr_set_imapaddr,
2112 .daio_mgr_commit_write = daio_mgr_commit_write,
2113};
2114
2115int __devinit create_20k2_hw_obj(struct hw **rhw)
2116{
2117 struct hw20k2 *hw20k2;
2118
2119 *rhw = NULL;
2120 hw20k2 = kzalloc(sizeof(*hw20k2), GFP_KERNEL);
2121 if (!hw20k2)
2122 return -ENOMEM;
2123
2124 hw20k2->hw = ct20k2_preset;
2125 *rhw = &hw20k2->hw;
2126
2127 return 0;
2128}
2129
2130int destroy_20k2_hw_obj(struct hw *hw)
2131{
2132 if (hw->io_base)
2133 hw_card_shutdown(hw);
2134
2135 kfree(hw);
2136 return 0;
2137}
diff --git a/sound/pci/ctxfi/cthw20k2.h b/sound/pci/ctxfi/cthw20k2.h
new file mode 100644
index 000000000000..d2b7daab6815
--- /dev/null
+++ b/sound/pci/ctxfi/cthw20k2.h
@@ -0,0 +1,26 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File cthw20k2.h
9 *
10 * @Brief
11 * This file contains the definition of hardware access methord.
12 *
13 * @Author Liu Chun
14 * @Date May 13 2008
15 *
16 */
17
18#ifndef CTHW20K2_H
19#define CTHW20K2_H
20
21#include "cthardware.h"
22
23int create_20k2_hw_obj(struct hw **rhw);
24int destroy_20k2_hw_obj(struct hw *hw);
25
26#endif /* CTHW20K2_H */
diff --git a/sound/pci/ctxfi/ctimap.c b/sound/pci/ctxfi/ctimap.c
new file mode 100644
index 000000000000..0b73368a4df6
--- /dev/null
+++ b/sound/pci/ctxfi/ctimap.c
@@ -0,0 +1,112 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctimap.c
9 *
10 * @Brief
11 * This file contains the implementation of generic input mapper operations
12 * for input mapper management.
13 *
14 * @Author Liu Chun
15 * @Date May 23 2008
16 *
17 */
18
19#include "ctimap.h"
20#include <linux/slab.h>
21
22int input_mapper_add(struct list_head *mappers, struct imapper *entry,
23 int (*map_op)(void *, struct imapper *), void *data)
24{
25 struct list_head *pos, *pre, *head;
26 struct imapper *pre_ent, *pos_ent;
27
28 head = mappers;
29
30 if (list_empty(head)) {
31 entry->next = entry->addr;
32 map_op(data, entry);
33 list_add(&entry->list, head);
34 return 0;
35 }
36
37 list_for_each(pos, head) {
38 pos_ent = list_entry(pos, struct imapper, list);
39 if (pos_ent->slot > entry->slot) {
40 /* found a position in list */
41 break;
42 }
43 }
44
45 if (pos != head) {
46 pre = pos->prev;
47 if (pre == head)
48 pre = head->prev;
49
50 __list_add(&entry->list, pos->prev, pos);
51 } else {
52 pre = head->prev;
53 pos = head->next;
54 list_add_tail(&entry->list, head);
55 }
56
57 pre_ent = list_entry(pre, struct imapper, list);
58 pos_ent = list_entry(pos, struct imapper, list);
59
60 entry->next = pos_ent->addr;
61 map_op(data, entry);
62 pre_ent->next = entry->addr;
63 map_op(data, pre_ent);
64
65 return 0;
66}
67
68int input_mapper_delete(struct list_head *mappers, struct imapper *entry,
69 int (*map_op)(void *, struct imapper *), void *data)
70{
71 struct list_head *next, *pre, *head;
72 struct imapper *pre_ent, *next_ent;
73
74 head = mappers;
75
76 if (list_empty(head))
77 return 0;
78
79 pre = (entry->list.prev == head) ? head->prev : entry->list.prev;
80 next = (entry->list.next == head) ? head->next : entry->list.next;
81
82 if (pre == &entry->list) {
83 /* entry is the only one node in mappers list */
84 entry->next = entry->addr = entry->user = entry->slot = 0;
85 map_op(data, entry);
86 list_del(&entry->list);
87 return 0;
88 }
89
90 pre_ent = list_entry(pre, struct imapper, list);
91 next_ent = list_entry(next, struct imapper, list);
92
93 pre_ent->next = next_ent->addr;
94 map_op(data, pre_ent);
95 list_del(&entry->list);
96
97 return 0;
98}
99
100void free_input_mapper_list(struct list_head *head)
101{
102 struct imapper *entry;
103 struct list_head *pos;
104
105 while (!list_empty(head)) {
106 pos = head->next;
107 list_del(pos);
108 entry = list_entry(pos, struct imapper, list);
109 kfree(entry);
110 }
111}
112
diff --git a/sound/pci/ctxfi/ctimap.h b/sound/pci/ctxfi/ctimap.h
new file mode 100644
index 000000000000..53ccf9be8b68
--- /dev/null
+++ b/sound/pci/ctxfi/ctimap.h
@@ -0,0 +1,40 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctimap.h
9 *
10 * @Brief
11 * This file contains the definition of generic input mapper operations
12 * for input mapper management.
13 *
14 * @Author Liu Chun
15 * @Date May 23 2008
16 *
17 */
18
19#ifndef CTIMAP_H
20#define CTIMAP_H
21
22#include <linux/list.h>
23
24struct imapper {
25 unsigned short slot; /* the id of the slot containing input data */
26 unsigned short user; /* the id of the user resource consuming data */
27 unsigned short addr; /* the input mapper ram id */
28 unsigned short next; /* the next input mapper ram id */
29 struct list_head list;
30};
31
32int input_mapper_add(struct list_head *mappers, struct imapper *entry,
33 int (*map_op)(void *, struct imapper *), void *data);
34
35int input_mapper_delete(struct list_head *mappers, struct imapper *entry,
36 int (*map_op)(void *, struct imapper *), void *data);
37
38void free_input_mapper_list(struct list_head *mappers);
39
40#endif /* CTIMAP_H */
diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c
new file mode 100644
index 000000000000..666722d9de41
--- /dev/null
+++ b/sound/pci/ctxfi/ctmixer.c
@@ -0,0 +1,1123 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctmixer.c
9 *
10 * @Brief
11 * This file contains the implementation of alsa mixer device functions.
12 *
13 * @Author Liu Chun
14 * @Date May 28 2008
15 *
16 */
17
18
19#include "ctmixer.h"
20#include "ctamixer.h"
21#include <linux/slab.h>
22#include <sound/core.h>
23#include <sound/control.h>
24#include <sound/asoundef.h>
25#include <sound/pcm.h>
26#include <sound/tlv.h>
27
28enum CT_SUM_CTL {
29 SUM_IN_F,
30 SUM_IN_R,
31 SUM_IN_C,
32 SUM_IN_S,
33 SUM_IN_F_C,
34
35 NUM_CT_SUMS
36};
37
38enum CT_AMIXER_CTL {
39 /* volume control mixers */
40 AMIXER_MASTER_F,
41 AMIXER_MASTER_R,
42 AMIXER_MASTER_C,
43 AMIXER_MASTER_S,
44 AMIXER_PCM_F,
45 AMIXER_PCM_R,
46 AMIXER_PCM_C,
47 AMIXER_PCM_S,
48 AMIXER_SPDIFI,
49 AMIXER_LINEIN,
50 AMIXER_MIC,
51 AMIXER_SPDIFO,
52 AMIXER_WAVE_F,
53 AMIXER_WAVE_R,
54 AMIXER_WAVE_C,
55 AMIXER_WAVE_S,
56 AMIXER_MASTER_F_C,
57 AMIXER_PCM_F_C,
58 AMIXER_SPDIFI_C,
59 AMIXER_LINEIN_C,
60 AMIXER_MIC_C,
61
62 /* this should always be the last one */
63 NUM_CT_AMIXERS
64};
65
66enum CTALSA_MIXER_CTL {
67 /* volume control mixers */
68 MIXER_MASTER_P,
69 MIXER_PCM_P,
70 MIXER_LINEIN_P,
71 MIXER_MIC_P,
72 MIXER_SPDIFI_P,
73 MIXER_SPDIFO_P,
74 MIXER_WAVEF_P,
75 MIXER_WAVER_P,
76 MIXER_WAVEC_P,
77 MIXER_WAVES_P,
78 MIXER_MASTER_C,
79 MIXER_PCM_C,
80 MIXER_LINEIN_C,
81 MIXER_MIC_C,
82 MIXER_SPDIFI_C,
83
84 /* switch control mixers */
85 MIXER_PCM_C_S,
86 MIXER_LINEIN_C_S,
87 MIXER_MIC_C_S,
88 MIXER_SPDIFI_C_S,
89 MIXER_LINEIN_P_S,
90 MIXER_SPDIFO_P_S,
91 MIXER_SPDIFI_P_S,
92 MIXER_WAVEF_P_S,
93 MIXER_WAVER_P_S,
94 MIXER_WAVEC_P_S,
95 MIXER_WAVES_P_S,
96 MIXER_DIGITAL_IO_S,
97 MIXER_IEC958_MASK,
98 MIXER_IEC958_DEFAULT,
99 MIXER_IEC958_STREAM,
100
101 /* this should always be the last one */
102 NUM_CTALSA_MIXERS
103};
104
105#define VOL_MIXER_START MIXER_MASTER_P
106#define VOL_MIXER_END MIXER_SPDIFI_C
107#define VOL_MIXER_NUM (VOL_MIXER_END - VOL_MIXER_START + 1)
108#define SWH_MIXER_START MIXER_PCM_C_S
109#define SWH_MIXER_END MIXER_DIGITAL_IO_S
110#define SWH_CAPTURE_START MIXER_PCM_C_S
111#define SWH_CAPTURE_END MIXER_SPDIFI_C_S
112
113#define CHN_NUM 2
114
115struct ct_kcontrol_init {
116 unsigned char ctl;
117 char *name;
118};
119
120static struct ct_kcontrol_init
121ct_kcontrol_init_table[NUM_CTALSA_MIXERS] = {
122 [MIXER_MASTER_P] = {
123 .ctl = 1,
124 .name = "Master Playback Volume",
125 },
126 [MIXER_MASTER_C] = {
127 .ctl = 1,
128 .name = "Master Capture Volume",
129 },
130 [MIXER_PCM_P] = {
131 .ctl = 1,
132 .name = "PCM Playback Volume",
133 },
134 [MIXER_PCM_C] = {
135 .ctl = 1,
136 .name = "PCM Capture Volume",
137 },
138 [MIXER_LINEIN_P] = {
139 .ctl = 1,
140 .name = "Line-in Playback Volume",
141 },
142 [MIXER_LINEIN_C] = {
143 .ctl = 1,
144 .name = "Line-in Capture Volume",
145 },
146 [MIXER_MIC_P] = {
147 .ctl = 1,
148 .name = "Mic Playback Volume",
149 },
150 [MIXER_MIC_C] = {
151 .ctl = 1,
152 .name = "Mic Capture Volume",
153 },
154 [MIXER_SPDIFI_P] = {
155 .ctl = 1,
156 .name = "S/PDIF-in Playback Volume",
157 },
158 [MIXER_SPDIFI_C] = {
159 .ctl = 1,
160 .name = "S/PDIF-in Capture Volume",
161 },
162 [MIXER_SPDIFO_P] = {
163 .ctl = 1,
164 .name = "S/PDIF-out Playback Volume",
165 },
166 [MIXER_WAVEF_P] = {
167 .ctl = 1,
168 .name = "Front Playback Volume",
169 },
170 [MIXER_WAVES_P] = {
171 .ctl = 1,
172 .name = "Side Playback Volume",
173 },
174 [MIXER_WAVEC_P] = {
175 .ctl = 1,
176 .name = "Center/LFE Playback Volume",
177 },
178 [MIXER_WAVER_P] = {
179 .ctl = 1,
180 .name = "Surround Playback Volume",
181 },
182
183 [MIXER_PCM_C_S] = {
184 .ctl = 1,
185 .name = "PCM Capture Switch",
186 },
187 [MIXER_LINEIN_C_S] = {
188 .ctl = 1,
189 .name = "Line-in Capture Switch",
190 },
191 [MIXER_MIC_C_S] = {
192 .ctl = 1,
193 .name = "Mic Capture Switch",
194 },
195 [MIXER_SPDIFI_C_S] = {
196 .ctl = 1,
197 .name = "S/PDIF-in Capture Switch",
198 },
199 [MIXER_LINEIN_P_S] = {
200 .ctl = 1,
201 .name = "Line-in Playback Switch",
202 },
203 [MIXER_SPDIFO_P_S] = {
204 .ctl = 1,
205 .name = "S/PDIF-out Playback Switch",
206 },
207 [MIXER_SPDIFI_P_S] = {
208 .ctl = 1,
209 .name = "S/PDIF-in Playback Switch",
210 },
211 [MIXER_WAVEF_P_S] = {
212 .ctl = 1,
213 .name = "Front Playback Switch",
214 },
215 [MIXER_WAVES_P_S] = {
216 .ctl = 1,
217 .name = "Side Playback Switch",
218 },
219 [MIXER_WAVEC_P_S] = {
220 .ctl = 1,
221 .name = "Center/LFE Playback Switch",
222 },
223 [MIXER_WAVER_P_S] = {
224 .ctl = 1,
225 .name = "Surround Playback Switch",
226 },
227 [MIXER_DIGITAL_IO_S] = {
228 .ctl = 0,
229 .name = "Digit-IO Playback Switch",
230 },
231};
232
233static void
234ct_mixer_recording_select(struct ct_mixer *mixer, enum CT_AMIXER_CTL type);
235
236static void
237ct_mixer_recording_unselect(struct ct_mixer *mixer, enum CT_AMIXER_CTL type);
238
239static struct snd_kcontrol *kctls[2] = {NULL};
240
241static enum CT_AMIXER_CTL get_amixer_index(enum CTALSA_MIXER_CTL alsa_index)
242{
243 switch (alsa_index) {
244 case MIXER_MASTER_P: return AMIXER_MASTER_F;
245 case MIXER_MASTER_C: return AMIXER_MASTER_F_C;
246 case MIXER_PCM_P: return AMIXER_PCM_F;
247 case MIXER_PCM_C:
248 case MIXER_PCM_C_S: return AMIXER_PCM_F_C;
249 case MIXER_LINEIN_P: return AMIXER_LINEIN;
250 case MIXER_LINEIN_C:
251 case MIXER_LINEIN_C_S: return AMIXER_LINEIN_C;
252 case MIXER_MIC_P: return AMIXER_MIC;
253 case MIXER_MIC_C:
254 case MIXER_MIC_C_S: return AMIXER_MIC_C;
255 case MIXER_SPDIFI_P: return AMIXER_SPDIFI;
256 case MIXER_SPDIFI_C:
257 case MIXER_SPDIFI_C_S: return AMIXER_SPDIFI_C;
258 case MIXER_SPDIFO_P: return AMIXER_SPDIFO;
259 case MIXER_WAVEF_P: return AMIXER_WAVE_F;
260 case MIXER_WAVES_P: return AMIXER_WAVE_S;
261 case MIXER_WAVEC_P: return AMIXER_WAVE_C;
262 case MIXER_WAVER_P: return AMIXER_WAVE_R;
263 default: return NUM_CT_AMIXERS;
264 }
265}
266
267static enum CT_AMIXER_CTL get_recording_amixer(enum CT_AMIXER_CTL index)
268{
269 switch (index) {
270 case AMIXER_MASTER_F: return AMIXER_MASTER_F_C;
271 case AMIXER_PCM_F: return AMIXER_PCM_F_C;
272 case AMIXER_SPDIFI: return AMIXER_SPDIFI_C;
273 case AMIXER_LINEIN: return AMIXER_LINEIN_C;
274 case AMIXER_MIC: return AMIXER_MIC_C;
275 default: return NUM_CT_AMIXERS;
276 }
277}
278
279static unsigned char
280get_switch_state(struct ct_mixer *mixer, enum CTALSA_MIXER_CTL type)
281{
282 return (mixer->switch_state & (0x1 << (type - SWH_MIXER_START)))
283 ? 1 : 0;
284}
285
286static void
287set_switch_state(struct ct_mixer *mixer,
288 enum CTALSA_MIXER_CTL type, unsigned char state)
289{
290 if (state)
291 mixer->switch_state |= (0x1 << (type - SWH_MIXER_START));
292 else
293 mixer->switch_state &= ~(0x1 << (type - SWH_MIXER_START));
294}
295
296#if 0 /* not used */
297/* Map integer value ranging from 0 to 65535 to 14-bit float value ranging
298 * from 2^-6 to (1+1023/1024) */
299static unsigned int uint16_to_float14(unsigned int x)
300{
301 unsigned int i;
302
303 if (x < 17)
304 return 0;
305
306 x *= 2031;
307 x /= 65535;
308 x += 16;
309
310 /* i <= 6 */
311 for (i = 0; !(x & 0x400); i++)
312 x <<= 1;
313
314 x = (((7 - i) & 0x7) << 10) | (x & 0x3ff);
315
316 return x;
317}
318
319static unsigned int float14_to_uint16(unsigned int x)
320{
321 unsigned int e;
322
323 if (!x)
324 return x;
325
326 e = (x >> 10) & 0x7;
327 x &= 0x3ff;
328 x += 1024;
329 x >>= (7 - e);
330 x -= 16;
331 x *= 65535;
332 x /= 2031;
333
334 return x;
335}
336#endif /* not used */
337
338#define VOL_SCALE 0x1c
339#define VOL_MAX 0x100
340
341static const DECLARE_TLV_DB_SCALE(ct_vol_db_scale, -6400, 25, 1);
342
343static int ct_alsa_mix_volume_info(struct snd_kcontrol *kcontrol,
344 struct snd_ctl_elem_info *uinfo)
345{
346 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
347 uinfo->count = 2;
348 uinfo->value.integer.min = 0;
349 uinfo->value.integer.max = VOL_MAX;
350
351 return 0;
352}
353
354static int ct_alsa_mix_volume_get(struct snd_kcontrol *kcontrol,
355 struct snd_ctl_elem_value *ucontrol)
356{
357 struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
358 enum CT_AMIXER_CTL type = get_amixer_index(kcontrol->private_value);
359 struct amixer *amixer;
360 int i, val;
361
362 for (i = 0; i < 2; i++) {
363 amixer = ((struct ct_mixer *)atc->mixer)->
364 amixers[type*CHN_NUM+i];
365 val = amixer->ops->get_scale(amixer) / VOL_SCALE;
366 if (val < 0)
367 val = 0;
368 else if (val > VOL_MAX)
369 val = VOL_MAX;
370 ucontrol->value.integer.value[i] = val;
371 }
372
373 return 0;
374}
375
376static int ct_alsa_mix_volume_put(struct snd_kcontrol *kcontrol,
377 struct snd_ctl_elem_value *ucontrol)
378{
379 struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
380 struct ct_mixer *mixer = atc->mixer;
381 enum CT_AMIXER_CTL type = get_amixer_index(kcontrol->private_value);
382 struct amixer *amixer;
383 int i, j, val, oval, change = 0;
384
385 for (i = 0; i < 2; i++) {
386 val = ucontrol->value.integer.value[i];
387 if (val < 0)
388 val = 0;
389 else if (val > VOL_MAX)
390 val = VOL_MAX;
391 val *= VOL_SCALE;
392 amixer = mixer->amixers[type*CHN_NUM+i];
393 oval = amixer->ops->get_scale(amixer);
394 if (val != oval) {
395 amixer->ops->set_scale(amixer, val);
396 amixer->ops->commit_write(amixer);
397 change = 1;
398 /* Synchronize Master/PCM playback AMIXERs. */
399 if (AMIXER_MASTER_F == type || AMIXER_PCM_F == type) {
400 for (j = 1; j < 4; j++) {
401 amixer = mixer->
402 amixers[(type+j)*CHN_NUM+i];
403 amixer->ops->set_scale(amixer, val);
404 amixer->ops->commit_write(amixer);
405 }
406 }
407 }
408 }
409
410 return change;
411}
412
413static struct snd_kcontrol_new vol_ctl = {
414 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
415 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
416 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
417 .info = ct_alsa_mix_volume_info,
418 .get = ct_alsa_mix_volume_get,
419 .put = ct_alsa_mix_volume_put,
420 .tlv = { .p = ct_vol_db_scale },
421};
422
423static void
424do_line_mic_switch(struct ct_atc *atc, enum CTALSA_MIXER_CTL type)
425{
426
427 if (MIXER_LINEIN_C_S == type) {
428 atc->select_line_in(atc);
429 set_switch_state(atc->mixer, MIXER_MIC_C_S, 0);
430 snd_ctl_notify(atc->card, SNDRV_CTL_EVENT_MASK_VALUE,
431 &kctls[1]->id);
432 } else if (MIXER_MIC_C_S == type) {
433 atc->select_mic_in(atc);
434 set_switch_state(atc->mixer, MIXER_LINEIN_C_S, 0);
435 snd_ctl_notify(atc->card, SNDRV_CTL_EVENT_MASK_VALUE,
436 &kctls[0]->id);
437 }
438}
439
440static void
441do_digit_io_switch(struct ct_atc *atc, int state)
442{
443 struct ct_mixer *mixer = atc->mixer;
444
445 if (state) {
446 atc->select_digit_io(atc);
447 atc->spdif_out_unmute(atc,
448 get_switch_state(mixer, MIXER_SPDIFO_P_S));
449 atc->spdif_in_unmute(atc, 1);
450 atc->line_in_unmute(atc, 0);
451 return;
452 }
453
454 if (get_switch_state(mixer, MIXER_LINEIN_C_S))
455 atc->select_line_in(atc);
456 else if (get_switch_state(mixer, MIXER_MIC_C_S))
457 atc->select_mic_in(atc);
458
459 atc->spdif_out_unmute(atc, 0);
460 atc->spdif_in_unmute(atc, 0);
461 atc->line_in_unmute(atc, 1);
462 return;
463}
464
465static int ct_alsa_mix_switch_info(struct snd_kcontrol *kcontrol,
466 struct snd_ctl_elem_info *uinfo)
467{
468 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
469 uinfo->count = 1;
470 uinfo->value.integer.min = 0;
471 uinfo->value.integer.max = 1;
472 uinfo->value.integer.step = 1;
473
474 return 0;
475}
476
477static int ct_alsa_mix_switch_get(struct snd_kcontrol *kcontrol,
478 struct snd_ctl_elem_value *ucontrol)
479{
480 struct ct_mixer *mixer =
481 ((struct ct_atc *)snd_kcontrol_chip(kcontrol))->mixer;
482 enum CTALSA_MIXER_CTL type = kcontrol->private_value;
483
484 ucontrol->value.integer.value[0] = get_switch_state(mixer, type);
485 return 0;
486}
487
488static int ct_alsa_mix_switch_put(struct snd_kcontrol *kcontrol,
489 struct snd_ctl_elem_value *ucontrol)
490{
491 struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
492 struct ct_mixer *mixer = atc->mixer;
493 enum CTALSA_MIXER_CTL type = kcontrol->private_value;
494 int state;
495
496 state = ucontrol->value.integer.value[0];
497 if (get_switch_state(mixer, type) == state)
498 return 0;
499
500 set_switch_state(mixer, type, state);
501 /* Do changes in mixer. */
502 if ((SWH_CAPTURE_START <= type) && (SWH_CAPTURE_END >= type)) {
503 if (state) {
504 ct_mixer_recording_select(mixer,
505 get_amixer_index(type));
506 } else {
507 ct_mixer_recording_unselect(mixer,
508 get_amixer_index(type));
509 }
510 }
511 /* Do changes out of mixer. */
512 if (state && (MIXER_LINEIN_C_S == type || MIXER_MIC_C_S == type))
513 do_line_mic_switch(atc, type);
514 else if (MIXER_WAVEF_P_S == type)
515 atc->line_front_unmute(atc, state);
516 else if (MIXER_WAVES_P_S == type)
517 atc->line_surround_unmute(atc, state);
518 else if (MIXER_WAVEC_P_S == type)
519 atc->line_clfe_unmute(atc, state);
520 else if (MIXER_WAVER_P_S == type)
521 atc->line_rear_unmute(atc, state);
522 else if (MIXER_LINEIN_P_S == type)
523 atc->line_in_unmute(atc, state);
524 else if (MIXER_SPDIFO_P_S == type)
525 atc->spdif_out_unmute(atc, state);
526 else if (MIXER_SPDIFI_P_S == type)
527 atc->spdif_in_unmute(atc, state);
528 else if (MIXER_DIGITAL_IO_S == type)
529 do_digit_io_switch(atc, state);
530
531 return 1;
532}
533
534static struct snd_kcontrol_new swh_ctl = {
535 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
536 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
537 .info = ct_alsa_mix_switch_info,
538 .get = ct_alsa_mix_switch_get,
539 .put = ct_alsa_mix_switch_put
540};
541
542static int ct_spdif_info(struct snd_kcontrol *kcontrol,
543 struct snd_ctl_elem_info *uinfo)
544{
545 uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
546 uinfo->count = 1;
547 return 0;
548}
549
550static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol,
551 struct snd_ctl_elem_value *ucontrol)
552{
553 ucontrol->value.iec958.status[0] = 0xff;
554 ucontrol->value.iec958.status[1] = 0xff;
555 ucontrol->value.iec958.status[2] = 0xff;
556 ucontrol->value.iec958.status[3] = 0xff;
557 return 0;
558}
559
560static int ct_spdif_default_get(struct snd_kcontrol *kcontrol,
561 struct snd_ctl_elem_value *ucontrol)
562{
563 unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF;
564
565 ucontrol->value.iec958.status[0] = (status >> 0) & 0xff;
566 ucontrol->value.iec958.status[1] = (status >> 8) & 0xff;
567 ucontrol->value.iec958.status[2] = (status >> 16) & 0xff;
568 ucontrol->value.iec958.status[3] = (status >> 24) & 0xff;
569
570 return 0;
571}
572
573static int ct_spdif_get(struct snd_kcontrol *kcontrol,
574 struct snd_ctl_elem_value *ucontrol)
575{
576 struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
577 unsigned int status;
578
579 atc->spdif_out_get_status(atc, &status);
580 ucontrol->value.iec958.status[0] = (status >> 0) & 0xff;
581 ucontrol->value.iec958.status[1] = (status >> 8) & 0xff;
582 ucontrol->value.iec958.status[2] = (status >> 16) & 0xff;
583 ucontrol->value.iec958.status[3] = (status >> 24) & 0xff;
584
585 return 0;
586}
587
588static int ct_spdif_put(struct snd_kcontrol *kcontrol,
589 struct snd_ctl_elem_value *ucontrol)
590{
591 struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
592 int change;
593 unsigned int status, old_status;
594
595 status = (ucontrol->value.iec958.status[0] << 0) |
596 (ucontrol->value.iec958.status[1] << 8) |
597 (ucontrol->value.iec958.status[2] << 16) |
598 (ucontrol->value.iec958.status[3] << 24);
599
600 atc->spdif_out_get_status(atc, &old_status);
601 change = (old_status != status);
602 if (change)
603 atc->spdif_out_set_status(atc, status);
604
605 return change;
606}
607
608static struct snd_kcontrol_new iec958_mask_ctl = {
609 .access = SNDRV_CTL_ELEM_ACCESS_READ,
610 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
611 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK),
612 .count = 1,
613 .info = ct_spdif_info,
614 .get = ct_spdif_get_mask,
615 .private_value = MIXER_IEC958_MASK
616};
617
618static struct snd_kcontrol_new iec958_default_ctl = {
619 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
620 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
621 .count = 1,
622 .info = ct_spdif_info,
623 .get = ct_spdif_default_get,
624 .put = ct_spdif_put,
625 .private_value = MIXER_IEC958_DEFAULT
626};
627
628static struct snd_kcontrol_new iec958_ctl = {
629 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
630 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
631 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM),
632 .count = 1,
633 .info = ct_spdif_info,
634 .get = ct_spdif_get,
635 .put = ct_spdif_put,
636 .private_value = MIXER_IEC958_STREAM
637};
638
639#define NUM_IEC958_CTL 3
640
641static int
642ct_mixer_kcontrol_new(struct ct_mixer *mixer, struct snd_kcontrol_new *new)
643{
644 struct snd_kcontrol *kctl;
645 int err;
646
647 kctl = snd_ctl_new1(new, mixer->atc);
648 if (NULL == kctl)
649 return -ENOMEM;
650
651 if (SNDRV_CTL_ELEM_IFACE_PCM == kctl->id.iface)
652 kctl->id.device = IEC958;
653
654 err = snd_ctl_add(mixer->atc->card, kctl);
655 if (err)
656 return err;
657
658 switch (new->private_value) {
659 case MIXER_LINEIN_C_S:
660 kctls[0] = kctl; break;
661 case MIXER_MIC_C_S:
662 kctls[1] = kctl; break;
663 default:
664 break;
665 }
666
667 return 0;
668}
669
670static int ct_mixer_kcontrols_create(struct ct_mixer *mixer)
671{
672 enum CTALSA_MIXER_CTL type;
673 struct ct_atc *atc = mixer->atc;
674 int err;
675
676 /* Create snd kcontrol instances on demand */
677 for (type = VOL_MIXER_START; type <= VOL_MIXER_END; type++) {
678 if (ct_kcontrol_init_table[type].ctl) {
679 vol_ctl.name = ct_kcontrol_init_table[type].name;
680 vol_ctl.private_value = (unsigned long)type;
681 err = ct_mixer_kcontrol_new(mixer, &vol_ctl);
682 if (err)
683 return err;
684 }
685 }
686
687 ct_kcontrol_init_table[MIXER_DIGITAL_IO_S].ctl =
688 atc->have_digit_io_switch(atc);
689 for (type = SWH_MIXER_START; type <= SWH_MIXER_END; type++) {
690 if (ct_kcontrol_init_table[type].ctl) {
691 swh_ctl.name = ct_kcontrol_init_table[type].name;
692 swh_ctl.private_value = (unsigned long)type;
693 err = ct_mixer_kcontrol_new(mixer, &swh_ctl);
694 if (err)
695 return err;
696 }
697 }
698
699 err = ct_mixer_kcontrol_new(mixer, &iec958_mask_ctl);
700 if (err)
701 return err;
702
703 err = ct_mixer_kcontrol_new(mixer, &iec958_default_ctl);
704 if (err)
705 return err;
706
707 err = ct_mixer_kcontrol_new(mixer, &iec958_ctl);
708 if (err)
709 return err;
710
711 atc->line_front_unmute(atc, 1);
712 set_switch_state(mixer, MIXER_WAVEF_P_S, 1);
713 atc->line_surround_unmute(atc, 0);
714 set_switch_state(mixer, MIXER_WAVES_P_S, 0);
715 atc->line_clfe_unmute(atc, 0);
716 set_switch_state(mixer, MIXER_WAVEC_P_S, 0);
717 atc->line_rear_unmute(atc, 0);
718 set_switch_state(mixer, MIXER_WAVER_P_S, 0);
719 atc->spdif_out_unmute(atc, 0);
720 set_switch_state(mixer, MIXER_SPDIFO_P_S, 0);
721 atc->line_in_unmute(atc, 0);
722 set_switch_state(mixer, MIXER_LINEIN_P_S, 0);
723 atc->spdif_in_unmute(atc, 0);
724 set_switch_state(mixer, MIXER_SPDIFI_P_S, 0);
725
726 set_switch_state(mixer, MIXER_PCM_C_S, 1);
727 set_switch_state(mixer, MIXER_LINEIN_C_S, 1);
728 set_switch_state(mixer, MIXER_SPDIFI_C_S, 1);
729
730 return 0;
731}
732
733static void
734ct_mixer_recording_select(struct ct_mixer *mixer, enum CT_AMIXER_CTL type)
735{
736 struct amixer *amix_d;
737 struct sum *sum_c;
738 int i;
739
740 for (i = 0; i < 2; i++) {
741 amix_d = mixer->amixers[type*CHN_NUM+i];
742 sum_c = mixer->sums[SUM_IN_F_C*CHN_NUM+i];
743 amix_d->ops->set_sum(amix_d, sum_c);
744 amix_d->ops->commit_write(amix_d);
745 }
746}
747
748static void
749ct_mixer_recording_unselect(struct ct_mixer *mixer, enum CT_AMIXER_CTL type)
750{
751 struct amixer *amix_d;
752 int i;
753
754 for (i = 0; i < 2; i++) {
755 amix_d = mixer->amixers[type*CHN_NUM+i];
756 amix_d->ops->set_sum(amix_d, NULL);
757 amix_d->ops->commit_write(amix_d);
758 }
759}
760
761static int ct_mixer_get_resources(struct ct_mixer *mixer)
762{
763 struct sum_mgr *sum_mgr;
764 struct sum *sum;
765 struct sum_desc sum_desc = {0};
766 struct amixer_mgr *amixer_mgr;
767 struct amixer *amixer;
768 struct amixer_desc am_desc = {0};
769 int err;
770 int i;
771
772 /* Allocate sum resources for mixer obj */
773 sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM];
774 sum_desc.msr = mixer->atc->msr;
775 for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
776 err = sum_mgr->get_sum(sum_mgr, &sum_desc, &sum);
777 if (err) {
778 printk(KERN_ERR "ctxfi:Failed to get sum resources for "
779 "front output!\n");
780 break;
781 }
782 mixer->sums[i] = sum;
783 }
784 if (err)
785 goto error1;
786
787 /* Allocate amixer resources for mixer obj */
788 amixer_mgr = (struct amixer_mgr *)mixer->atc->rsc_mgrs[AMIXER];
789 am_desc.msr = mixer->atc->msr;
790 for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
791 err = amixer_mgr->get_amixer(amixer_mgr, &am_desc, &amixer);
792 if (err) {
793 printk(KERN_ERR "ctxfi:Failed to get amixer resources "
794 "for mixer obj!\n");
795 break;
796 }
797 mixer->amixers[i] = amixer;
798 }
799 if (err)
800 goto error2;
801
802 return 0;
803
804error2:
805 for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
806 if (NULL != mixer->amixers[i]) {
807 amixer = mixer->amixers[i];
808 amixer_mgr->put_amixer(amixer_mgr, amixer);
809 mixer->amixers[i] = NULL;
810 }
811 }
812error1:
813 for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
814 if (NULL != mixer->sums[i]) {
815 sum_mgr->put_sum(sum_mgr, (struct sum *)mixer->sums[i]);
816 mixer->sums[i] = NULL;
817 }
818 }
819
820 return err;
821}
822
823static int ct_mixer_get_mem(struct ct_mixer **rmixer)
824{
825 struct ct_mixer *mixer;
826 int err;
827
828 *rmixer = NULL;
829 /* Allocate mem for mixer obj */
830 mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
831 if (NULL == mixer)
832 return -ENOMEM;
833
834 mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM),
835 GFP_KERNEL);
836 if (NULL == mixer->amixers) {
837 err = -ENOMEM;
838 goto error1;
839 }
840 mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL);
841 if (NULL == mixer->sums) {
842 err = -ENOMEM;
843 goto error2;
844 }
845
846 *rmixer = mixer;
847 return 0;
848
849error2:
850 kfree(mixer->amixers);
851error1:
852 kfree(mixer);
853 return err;
854}
855
856static int ct_mixer_topology_build(struct ct_mixer *mixer)
857{
858 struct sum *sum;
859 struct amixer *amix_d, *amix_s;
860 enum CT_AMIXER_CTL i, j;
861
862 /* Build topology from destination to source */
863
864 /* Set up Master mixer */
865 for (i = AMIXER_MASTER_F, j = SUM_IN_F;
866 i <= AMIXER_MASTER_S; i++, j++) {
867 amix_d = mixer->amixers[i*CHN_NUM];
868 sum = mixer->sums[j*CHN_NUM];
869 amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
870 amix_d = mixer->amixers[i*CHN_NUM+1];
871 sum = mixer->sums[j*CHN_NUM+1];
872 amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
873 }
874
875 /* Set up Wave-out mixer */
876 for (i = AMIXER_WAVE_F, j = AMIXER_MASTER_F;
877 i <= AMIXER_WAVE_S; i++, j++) {
878 amix_d = mixer->amixers[i*CHN_NUM];
879 amix_s = mixer->amixers[j*CHN_NUM];
880 amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
881 amix_d = mixer->amixers[i*CHN_NUM+1];
882 amix_s = mixer->amixers[j*CHN_NUM+1];
883 amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
884 }
885
886 /* Set up S/PDIF-out mixer */
887 amix_d = mixer->amixers[AMIXER_SPDIFO*CHN_NUM];
888 amix_s = mixer->amixers[AMIXER_MASTER_F*CHN_NUM];
889 amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
890 amix_d = mixer->amixers[AMIXER_SPDIFO*CHN_NUM+1];
891 amix_s = mixer->amixers[AMIXER_MASTER_F*CHN_NUM+1];
892 amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
893
894 /* Set up PCM-in mixer */
895 for (i = AMIXER_PCM_F, j = SUM_IN_F; i <= AMIXER_PCM_S; i++, j++) {
896 amix_d = mixer->amixers[i*CHN_NUM];
897 sum = mixer->sums[j*CHN_NUM];
898 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
899 amix_d = mixer->amixers[i*CHN_NUM+1];
900 sum = mixer->sums[j*CHN_NUM+1];
901 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
902 }
903
904 /* Set up Line-in mixer */
905 amix_d = mixer->amixers[AMIXER_LINEIN*CHN_NUM];
906 sum = mixer->sums[SUM_IN_F*CHN_NUM];
907 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
908 amix_d = mixer->amixers[AMIXER_LINEIN*CHN_NUM+1];
909 sum = mixer->sums[SUM_IN_F*CHN_NUM+1];
910 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
911
912 /* Set up Mic-in mixer */
913 amix_d = mixer->amixers[AMIXER_MIC*CHN_NUM];
914 sum = mixer->sums[SUM_IN_F*CHN_NUM];
915 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
916 amix_d = mixer->amixers[AMIXER_MIC*CHN_NUM+1];
917 sum = mixer->sums[SUM_IN_F*CHN_NUM+1];
918 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
919
920 /* Set up S/PDIF-in mixer */
921 amix_d = mixer->amixers[AMIXER_SPDIFI*CHN_NUM];
922 sum = mixer->sums[SUM_IN_F*CHN_NUM];
923 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
924 amix_d = mixer->amixers[AMIXER_SPDIFI*CHN_NUM+1];
925 sum = mixer->sums[SUM_IN_F*CHN_NUM+1];
926 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
927
928 /* Set up Master recording mixer */
929 amix_d = mixer->amixers[AMIXER_MASTER_F_C*CHN_NUM];
930 sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
931 amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
932 amix_d = mixer->amixers[AMIXER_MASTER_F_C*CHN_NUM+1];
933 sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
934 amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
935
936 /* Set up PCM-in recording mixer */
937 amix_d = mixer->amixers[AMIXER_PCM_F_C*CHN_NUM];
938 sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
939 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
940 amix_d = mixer->amixers[AMIXER_PCM_F_C*CHN_NUM+1];
941 sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
942 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
943
944 /* Set up Line-in recording mixer */
945 amix_d = mixer->amixers[AMIXER_LINEIN_C*CHN_NUM];
946 sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
947 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
948 amix_d = mixer->amixers[AMIXER_LINEIN_C*CHN_NUM+1];
949 sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
950 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
951
952 /* Set up Mic-in recording mixer */
953 amix_d = mixer->amixers[AMIXER_MIC_C*CHN_NUM];
954 sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
955 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
956 amix_d = mixer->amixers[AMIXER_MIC_C*CHN_NUM+1];
957 sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
958 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
959
960 /* Set up S/PDIF-in recording mixer */
961 amix_d = mixer->amixers[AMIXER_SPDIFI_C*CHN_NUM];
962 sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
963 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
964 amix_d = mixer->amixers[AMIXER_SPDIFI_C*CHN_NUM+1];
965 sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
966 amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
967
968 return 0;
969}
970
971static int mixer_set_input_port(struct amixer *amixer, struct rsc *rsc)
972{
973 amixer->ops->set_input(amixer, rsc);
974 amixer->ops->commit_write(amixer);
975
976 return 0;
977}
978
979static enum CT_AMIXER_CTL port_to_amixer(enum MIXER_PORT_T type)
980{
981 switch (type) {
982 case MIX_WAVE_FRONT: return AMIXER_WAVE_F;
983 case MIX_WAVE_SURROUND: return AMIXER_WAVE_S;
984 case MIX_WAVE_CENTLFE: return AMIXER_WAVE_C;
985 case MIX_WAVE_REAR: return AMIXER_WAVE_R;
986 case MIX_PCMO_FRONT: return AMIXER_MASTER_F_C;
987 case MIX_SPDIF_OUT: return AMIXER_SPDIFO;
988 case MIX_LINE_IN: return AMIXER_LINEIN;
989 case MIX_MIC_IN: return AMIXER_MIC;
990 case MIX_SPDIF_IN: return AMIXER_SPDIFI;
991 case MIX_PCMI_FRONT: return AMIXER_PCM_F;
992 case MIX_PCMI_SURROUND: return AMIXER_PCM_S;
993 case MIX_PCMI_CENTLFE: return AMIXER_PCM_C;
994 case MIX_PCMI_REAR: return AMIXER_PCM_R;
995 default: return 0;
996 }
997}
998
999static int mixer_get_output_ports(struct ct_mixer *mixer,
1000 enum MIXER_PORT_T type,
1001 struct rsc **rleft, struct rsc **rright)
1002{
1003 enum CT_AMIXER_CTL amix = port_to_amixer(type);
1004
1005 if (NULL != rleft)
1006 *rleft = &((struct amixer *)mixer->amixers[amix*CHN_NUM])->rsc;
1007
1008 if (NULL != rright)
1009 *rright =
1010 &((struct amixer *)mixer->amixers[amix*CHN_NUM+1])->rsc;
1011
1012 return 0;
1013}
1014
1015static int mixer_set_input_left(struct ct_mixer *mixer,
1016 enum MIXER_PORT_T type, struct rsc *rsc)
1017{
1018 enum CT_AMIXER_CTL amix = port_to_amixer(type);
1019
1020 mixer_set_input_port(mixer->amixers[amix*CHN_NUM], rsc);
1021 amix = get_recording_amixer(amix);
1022 if (amix < NUM_CT_AMIXERS)
1023 mixer_set_input_port(mixer->amixers[amix*CHN_NUM], rsc);
1024
1025 return 0;
1026}
1027
1028static int
1029mixer_set_input_right(struct ct_mixer *mixer,
1030 enum MIXER_PORT_T type, struct rsc *rsc)
1031{
1032 enum CT_AMIXER_CTL amix = port_to_amixer(type);
1033
1034 mixer_set_input_port(mixer->amixers[amix*CHN_NUM+1], rsc);
1035 amix = get_recording_amixer(amix);
1036 if (amix < NUM_CT_AMIXERS)
1037 mixer_set_input_port(mixer->amixers[amix*CHN_NUM+1], rsc);
1038
1039 return 0;
1040}
1041
1042int ct_mixer_destroy(struct ct_mixer *mixer)
1043{
1044 struct sum_mgr *sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM];
1045 struct amixer_mgr *amixer_mgr =
1046 (struct amixer_mgr *)mixer->atc->rsc_mgrs[AMIXER];
1047 struct amixer *amixer;
1048 int i = 0;
1049
1050 /* Release amixer resources */
1051 for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
1052 if (NULL != mixer->amixers[i]) {
1053 amixer = mixer->amixers[i];
1054 amixer_mgr->put_amixer(amixer_mgr, amixer);
1055 }
1056 }
1057
1058 /* Release sum resources */
1059 for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
1060 if (NULL != mixer->sums[i])
1061 sum_mgr->put_sum(sum_mgr, (struct sum *)mixer->sums[i]);
1062 }
1063
1064 /* Release mem assigned to mixer object */
1065 kfree(mixer->sums);
1066 kfree(mixer->amixers);
1067 kfree(mixer);
1068
1069 return 0;
1070}
1071
1072int ct_mixer_create(struct ct_atc *atc, struct ct_mixer **rmixer)
1073{
1074 struct ct_mixer *mixer;
1075 int err;
1076
1077 *rmixer = NULL;
1078
1079 /* Allocate mem for mixer obj */
1080 err = ct_mixer_get_mem(&mixer);
1081 if (err)
1082 return err;
1083
1084 mixer->switch_state = 0;
1085 mixer->atc = atc;
1086 /* Set operations */
1087 mixer->get_output_ports = mixer_get_output_ports;
1088 mixer->set_input_left = mixer_set_input_left;
1089 mixer->set_input_right = mixer_set_input_right;
1090
1091 /* Allocate chip resources for mixer obj */
1092 err = ct_mixer_get_resources(mixer);
1093 if (err)
1094 goto error;
1095
1096 /* Build internal mixer topology */
1097 ct_mixer_topology_build(mixer);
1098
1099 *rmixer = mixer;
1100
1101 return 0;
1102
1103error:
1104 ct_mixer_destroy(mixer);
1105 return err;
1106}
1107
1108int ct_alsa_mix_create(struct ct_atc *atc,
1109 enum CTALSADEVS device,
1110 const char *device_name)
1111{
1112 int err;
1113
1114 /* Create snd kcontrol instances on demand */
1115 /* vol_ctl.device = swh_ctl.device = device; */ /* better w/ device 0 */
1116 err = ct_mixer_kcontrols_create((struct ct_mixer *)atc->mixer);
1117 if (err)
1118 return err;
1119
1120 strcpy(atc->card->mixername, device_name);
1121
1122 return 0;
1123}
diff --git a/sound/pci/ctxfi/ctmixer.h b/sound/pci/ctxfi/ctmixer.h
new file mode 100644
index 000000000000..e2d96ebde746
--- /dev/null
+++ b/sound/pci/ctxfi/ctmixer.h
@@ -0,0 +1,67 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctmixer.h
9 *
10 * @Brief
11 * This file contains the definition of the mixer device functions.
12 *
13 * @Author Liu Chun
14 * @Date Mar 28 2008
15 *
16 */
17
18#ifndef CTMIXER_H
19#define CTMIXER_H
20
21#include "ctatc.h"
22#include "ctresource.h"
23
24#define INIT_VOL 0x1c00
25
26enum MIXER_PORT_T {
27 MIX_WAVE_FRONT,
28 MIX_WAVE_REAR,
29 MIX_WAVE_CENTLFE,
30 MIX_WAVE_SURROUND,
31 MIX_SPDIF_OUT,
32 MIX_PCMO_FRONT,
33 MIX_MIC_IN,
34 MIX_LINE_IN,
35 MIX_SPDIF_IN,
36 MIX_PCMI_FRONT,
37 MIX_PCMI_REAR,
38 MIX_PCMI_CENTLFE,
39 MIX_PCMI_SURROUND,
40
41 NUM_MIX_PORTS
42};
43
44/* alsa mixer descriptor */
45struct ct_mixer {
46 struct ct_atc *atc;
47
48 void **amixers; /* amixer resources for volume control */
49 void **sums; /* sum resources for signal collection */
50 unsigned int switch_state; /* A bit-map to indicate state of switches */
51
52 int (*get_output_ports)(struct ct_mixer *mixer, enum MIXER_PORT_T type,
53 struct rsc **rleft, struct rsc **rright);
54
55 int (*set_input_left)(struct ct_mixer *mixer,
56 enum MIXER_PORT_T type, struct rsc *rsc);
57 int (*set_input_right)(struct ct_mixer *mixer,
58 enum MIXER_PORT_T type, struct rsc *rsc);
59};
60
61int ct_alsa_mix_create(struct ct_atc *atc,
62 enum CTALSADEVS device,
63 const char *device_name);
64int ct_mixer_create(struct ct_atc *atc, struct ct_mixer **rmixer);
65int ct_mixer_destroy(struct ct_mixer *mixer);
66
67#endif /* CTMIXER_H */
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
new file mode 100644
index 000000000000..9e5c0c4da726
--- /dev/null
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -0,0 +1,426 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctpcm.c
9 *
10 * @Brief
11 * This file contains the definition of the pcm device functions.
12 *
13 * @Author Liu Chun
14 * @Date Apr 2 2008
15 *
16 */
17
18#include "ctpcm.h"
19#include "cttimer.h"
20#include <sound/pcm.h>
21
22/* Hardware descriptions for playback */
23static struct snd_pcm_hardware ct_pcm_playback_hw = {
24 .info = (SNDRV_PCM_INFO_MMAP |
25 SNDRV_PCM_INFO_INTERLEAVED |
26 SNDRV_PCM_INFO_BLOCK_TRANSFER |
27 SNDRV_PCM_INFO_MMAP_VALID |
28 SNDRV_PCM_INFO_PAUSE),
29 .formats = (SNDRV_PCM_FMTBIT_U8 |
30 SNDRV_PCM_FMTBIT_S16_LE |
31 SNDRV_PCM_FMTBIT_S24_3LE |
32 SNDRV_PCM_FMTBIT_S32_LE |
33 SNDRV_PCM_FMTBIT_FLOAT_LE),
34 .rates = (SNDRV_PCM_RATE_CONTINUOUS |
35 SNDRV_PCM_RATE_8000_192000),
36 .rate_min = 8000,
37 .rate_max = 192000,
38 .channels_min = 1,
39 .channels_max = 2,
40 .buffer_bytes_max = (128*1024),
41 .period_bytes_min = (64),
42 .period_bytes_max = (128*1024),
43 .periods_min = 2,
44 .periods_max = 1024,
45 .fifo_size = 0,
46};
47
48static struct snd_pcm_hardware ct_spdif_passthru_playback_hw = {
49 .info = (SNDRV_PCM_INFO_MMAP |
50 SNDRV_PCM_INFO_INTERLEAVED |
51 SNDRV_PCM_INFO_BLOCK_TRANSFER |
52 SNDRV_PCM_INFO_MMAP_VALID |
53 SNDRV_PCM_INFO_PAUSE),
54 .formats = SNDRV_PCM_FMTBIT_S16_LE,
55 .rates = (SNDRV_PCM_RATE_48000 |
56 SNDRV_PCM_RATE_44100 |
57 SNDRV_PCM_RATE_32000),
58 .rate_min = 32000,
59 .rate_max = 48000,
60 .channels_min = 2,
61 .channels_max = 2,
62 .buffer_bytes_max = (128*1024),
63 .period_bytes_min = (64),
64 .period_bytes_max = (128*1024),
65 .periods_min = 2,
66 .periods_max = 1024,
67 .fifo_size = 0,
68};
69
70/* Hardware descriptions for capture */
71static struct snd_pcm_hardware ct_pcm_capture_hw = {
72 .info = (SNDRV_PCM_INFO_MMAP |
73 SNDRV_PCM_INFO_INTERLEAVED |
74 SNDRV_PCM_INFO_BLOCK_TRANSFER |
75 SNDRV_PCM_INFO_PAUSE |
76 SNDRV_PCM_INFO_MMAP_VALID),
77 .formats = (SNDRV_PCM_FMTBIT_U8 |
78 SNDRV_PCM_FMTBIT_S16_LE |
79 SNDRV_PCM_FMTBIT_S24_3LE |
80 SNDRV_PCM_FMTBIT_S32_LE |
81 SNDRV_PCM_FMTBIT_FLOAT_LE),
82 .rates = (SNDRV_PCM_RATE_CONTINUOUS |
83 SNDRV_PCM_RATE_8000_96000),
84 .rate_min = 8000,
85 .rate_max = 96000,
86 .channels_min = 1,
87 .channels_max = 2,
88 .buffer_bytes_max = (128*1024),
89 .period_bytes_min = (384),
90 .period_bytes_max = (64*1024),
91 .periods_min = 2,
92 .periods_max = 1024,
93 .fifo_size = 0,
94};
95
96static void ct_atc_pcm_interrupt(struct ct_atc_pcm *atc_pcm)
97{
98 struct ct_atc_pcm *apcm = atc_pcm;
99
100 if (NULL == apcm->substream)
101 return;
102
103 snd_pcm_period_elapsed(apcm->substream);
104}
105
106static void ct_atc_pcm_free_substream(struct snd_pcm_runtime *runtime)
107{
108 struct ct_atc_pcm *apcm = runtime->private_data;
109 struct ct_atc *atc = snd_pcm_substream_chip(apcm->substream);
110
111 atc->pcm_release_resources(atc, apcm);
112 ct_timer_instance_free(apcm->timer);
113 kfree(apcm);
114 runtime->private_data = NULL;
115}
116
117/* pcm playback operations */
118static int ct_pcm_playback_open(struct snd_pcm_substream *substream)
119{
120 struct ct_atc *atc = snd_pcm_substream_chip(substream);
121 struct snd_pcm_runtime *runtime = substream->runtime;
122 struct ct_atc_pcm *apcm;
123 int err;
124
125 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
126 if (NULL == apcm)
127 return -ENOMEM;
128
129 apcm->substream = substream;
130 apcm->interrupt = ct_atc_pcm_interrupt;
131 runtime->private_data = apcm;
132 runtime->private_free = ct_atc_pcm_free_substream;
133 if (IEC958 == substream->pcm->device) {
134 runtime->hw = ct_spdif_passthru_playback_hw;
135 atc->spdif_out_passthru(atc, 1);
136 } else {
137 runtime->hw = ct_pcm_playback_hw;
138 if (FRONT == substream->pcm->device)
139 runtime->hw.channels_max = 8;
140 }
141
142 err = snd_pcm_hw_constraint_integer(runtime,
143 SNDRV_PCM_HW_PARAM_PERIODS);
144 if (err < 0) {
145 kfree(apcm);
146 return err;
147 }
148 err = snd_pcm_hw_constraint_minmax(runtime,
149 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
150 1024, UINT_MAX);
151 if (err < 0) {
152 kfree(apcm);
153 return err;
154 }
155
156 apcm->timer = ct_timer_instance_new(atc->timer, apcm);
157 if (!apcm->timer)
158 return -ENOMEM;
159
160 return 0;
161}
162
163static int ct_pcm_playback_close(struct snd_pcm_substream *substream)
164{
165 struct ct_atc *atc = snd_pcm_substream_chip(substream);
166
167 /* TODO: Notify mixer inactive. */
168 if (IEC958 == substream->pcm->device)
169 atc->spdif_out_passthru(atc, 0);
170
171 /* The ct_atc_pcm object will be freed by runtime->private_free */
172
173 return 0;
174}
175
176static int ct_pcm_hw_params(struct snd_pcm_substream *substream,
177 struct snd_pcm_hw_params *hw_params)
178{
179 struct ct_atc *atc = snd_pcm_substream_chip(substream);
180 struct ct_atc_pcm *apcm = substream->runtime->private_data;
181 int err;
182
183 err = snd_pcm_lib_malloc_pages(substream,
184 params_buffer_bytes(hw_params));
185 if (err < 0)
186 return err;
187 /* clear previous resources */
188 atc->pcm_release_resources(atc, apcm);
189 return err;
190}
191
192static int ct_pcm_hw_free(struct snd_pcm_substream *substream)
193{
194 struct ct_atc *atc = snd_pcm_substream_chip(substream);
195 struct ct_atc_pcm *apcm = substream->runtime->private_data;
196
197 /* clear previous resources */
198 atc->pcm_release_resources(atc, apcm);
199 /* Free snd-allocated pages */
200 return snd_pcm_lib_free_pages(substream);
201}
202
203
204static int ct_pcm_playback_prepare(struct snd_pcm_substream *substream)
205{
206 int err;
207 struct ct_atc *atc = snd_pcm_substream_chip(substream);
208 struct snd_pcm_runtime *runtime = substream->runtime;
209 struct ct_atc_pcm *apcm = runtime->private_data;
210
211 if (IEC958 == substream->pcm->device)
212 err = atc->spdif_passthru_playback_prepare(atc, apcm);
213 else
214 err = atc->pcm_playback_prepare(atc, apcm);
215
216 if (err < 0) {
217 printk(KERN_ERR "ctxfi: Preparing pcm playback failed!!!\n");
218 return err;
219 }
220
221 return 0;
222}
223
224static int
225ct_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
226{
227 struct ct_atc *atc = snd_pcm_substream_chip(substream);
228 struct snd_pcm_runtime *runtime = substream->runtime;
229 struct ct_atc_pcm *apcm = runtime->private_data;
230
231 switch (cmd) {
232 case SNDRV_PCM_TRIGGER_START:
233 case SNDRV_PCM_TRIGGER_RESUME:
234 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
235 atc->pcm_playback_start(atc, apcm);
236 break;
237 case SNDRV_PCM_TRIGGER_STOP:
238 case SNDRV_PCM_TRIGGER_SUSPEND:
239 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
240 atc->pcm_playback_stop(atc, apcm);
241 break;
242 default:
243 break;
244 }
245
246 return 0;
247}
248
249static snd_pcm_uframes_t
250ct_pcm_playback_pointer(struct snd_pcm_substream *substream)
251{
252 unsigned long position;
253 struct ct_atc *atc = snd_pcm_substream_chip(substream);
254 struct snd_pcm_runtime *runtime = substream->runtime;
255 struct ct_atc_pcm *apcm = runtime->private_data;
256
257 /* Read out playback position */
258 position = atc->pcm_playback_position(atc, apcm);
259 position = bytes_to_frames(runtime, position);
260 if (position >= runtime->buffer_size)
261 position = 0;
262 return position;
263}
264
265/* pcm capture operations */
266static int ct_pcm_capture_open(struct snd_pcm_substream *substream)
267{
268 struct ct_atc *atc = snd_pcm_substream_chip(substream);
269 struct snd_pcm_runtime *runtime = substream->runtime;
270 struct ct_atc_pcm *apcm;
271 int err;
272
273 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
274 if (NULL == apcm)
275 return -ENOMEM;
276
277 apcm->started = 0;
278 apcm->substream = substream;
279 apcm->interrupt = ct_atc_pcm_interrupt;
280 runtime->private_data = apcm;
281 runtime->private_free = ct_atc_pcm_free_substream;
282 runtime->hw = ct_pcm_capture_hw;
283 runtime->hw.rate_max = atc->rsr * atc->msr;
284
285 err = snd_pcm_hw_constraint_integer(runtime,
286 SNDRV_PCM_HW_PARAM_PERIODS);
287 if (err < 0) {
288 kfree(apcm);
289 return err;
290 }
291 err = snd_pcm_hw_constraint_minmax(runtime,
292 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
293 1024, UINT_MAX);
294 if (err < 0) {
295 kfree(apcm);
296 return err;
297 }
298
299 apcm->timer = ct_timer_instance_new(atc->timer, apcm);
300 if (!apcm->timer)
301 return -ENOMEM;
302
303 return 0;
304}
305
306static int ct_pcm_capture_close(struct snd_pcm_substream *substream)
307{
308 /* The ct_atc_pcm object will be freed by runtime->private_free */
309 /* TODO: Notify mixer inactive. */
310 return 0;
311}
312
313static int ct_pcm_capture_prepare(struct snd_pcm_substream *substream)
314{
315 int err;
316 struct ct_atc *atc = snd_pcm_substream_chip(substream);
317 struct snd_pcm_runtime *runtime = substream->runtime;
318 struct ct_atc_pcm *apcm = runtime->private_data;
319
320 err = atc->pcm_capture_prepare(atc, apcm);
321 if (err < 0) {
322 printk(KERN_ERR "ctxfi: Preparing pcm capture failed!!!\n");
323 return err;
324 }
325
326 return 0;
327}
328
329static int
330ct_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd)
331{
332 struct ct_atc *atc = snd_pcm_substream_chip(substream);
333 struct snd_pcm_runtime *runtime = substream->runtime;
334 struct ct_atc_pcm *apcm = runtime->private_data;
335
336 switch (cmd) {
337 case SNDRV_PCM_TRIGGER_START:
338 atc->pcm_capture_start(atc, apcm);
339 break;
340 case SNDRV_PCM_TRIGGER_STOP:
341 atc->pcm_capture_stop(atc, apcm);
342 break;
343 default:
344 atc->pcm_capture_stop(atc, apcm);
345 break;
346 }
347
348 return 0;
349}
350
351static snd_pcm_uframes_t
352ct_pcm_capture_pointer(struct snd_pcm_substream *substream)
353{
354 unsigned long position;
355 struct ct_atc *atc = snd_pcm_substream_chip(substream);
356 struct snd_pcm_runtime *runtime = substream->runtime;
357 struct ct_atc_pcm *apcm = runtime->private_data;
358
359 /* Read out playback position */
360 position = atc->pcm_capture_position(atc, apcm);
361 position = bytes_to_frames(runtime, position);
362 if (position >= runtime->buffer_size)
363 position = 0;
364 return position;
365}
366
367/* PCM operators for playback */
368static struct snd_pcm_ops ct_pcm_playback_ops = {
369 .open = ct_pcm_playback_open,
370 .close = ct_pcm_playback_close,
371 .ioctl = snd_pcm_lib_ioctl,
372 .hw_params = ct_pcm_hw_params,
373 .hw_free = ct_pcm_hw_free,
374 .prepare = ct_pcm_playback_prepare,
375 .trigger = ct_pcm_playback_trigger,
376 .pointer = ct_pcm_playback_pointer,
377 .page = snd_pcm_sgbuf_ops_page,
378};
379
380/* PCM operators for capture */
381static struct snd_pcm_ops ct_pcm_capture_ops = {
382 .open = ct_pcm_capture_open,
383 .close = ct_pcm_capture_close,
384 .ioctl = snd_pcm_lib_ioctl,
385 .hw_params = ct_pcm_hw_params,
386 .hw_free = ct_pcm_hw_free,
387 .prepare = ct_pcm_capture_prepare,
388 .trigger = ct_pcm_capture_trigger,
389 .pointer = ct_pcm_capture_pointer,
390 .page = snd_pcm_sgbuf_ops_page,
391};
392
393/* Create ALSA pcm device */
394int ct_alsa_pcm_create(struct ct_atc *atc,
395 enum CTALSADEVS device,
396 const char *device_name)
397{
398 struct snd_pcm *pcm;
399 int err;
400 int playback_count, capture_count;
401
402 playback_count = (IEC958 == device) ? 1 : 8;
403 capture_count = (FRONT == device) ? 1 : 0;
404 err = snd_pcm_new(atc->card, "ctxfi", device,
405 playback_count, capture_count, &pcm);
406 if (err < 0) {
407 printk(KERN_ERR "ctxfi: snd_pcm_new failed!! Err=%d\n", err);
408 return err;
409 }
410
411 pcm->private_data = atc;
412 pcm->info_flags = 0;
413 pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX;
414 strlcpy(pcm->name, device_name, sizeof(pcm->name));
415
416 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ct_pcm_playback_ops);
417
418 if (FRONT == device)
419 snd_pcm_set_ops(pcm,
420 SNDRV_PCM_STREAM_CAPTURE, &ct_pcm_capture_ops);
421
422 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
423 snd_dma_pci_data(atc->pci), 128*1024, 128*1024);
424
425 return 0;
426}
diff --git a/sound/pci/ctxfi/ctpcm.h b/sound/pci/ctxfi/ctpcm.h
new file mode 100644
index 000000000000..178da0dca647
--- /dev/null
+++ b/sound/pci/ctxfi/ctpcm.h
@@ -0,0 +1,27 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctpcm.h
9 *
10 * @Brief
11 * This file contains the definition of the pcm device functions.
12 *
13 * @Author Liu Chun
14 * @Date Mar 28 2008
15 *
16 */
17
18#ifndef CTPCM_H
19#define CTPCM_H
20
21#include "ctatc.h"
22
23int ct_alsa_pcm_create(struct ct_atc *atc,
24 enum CTALSADEVS device,
25 const char *device_name);
26
27#endif /* CTPCM_H */
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
new file mode 100644
index 000000000000..889c495bb7d1
--- /dev/null
+++ b/sound/pci/ctxfi/ctresource.c
@@ -0,0 +1,301 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctresource.c
9 *
10 * @Brief
11 * This file contains the implementation of some generic helper functions.
12 *
13 * @Author Liu Chun
14 * @Date May 15 2008
15 *
16 */
17
18#include "ctresource.h"
19#include "cthardware.h"
20#include <linux/err.h>
21#include <linux/slab.h>
22
23#define AUDIO_SLOT_BLOCK_NUM 256
24
25/* Resource allocation based on bit-map management mechanism */
26static int
27get_resource(u8 *rscs, unsigned int amount,
28 unsigned int multi, unsigned int *ridx)
29{
30 int i, j, k, n;
31
32 /* Check whether there are sufficient resources to meet request. */
33 for (i = 0, n = multi; i < amount; i++) {
34 j = i / 8;
35 k = i % 8;
36 if (rscs[j] & ((u8)1 << k)) {
37 n = multi;
38 continue;
39 }
40 if (!(--n))
41 break; /* found sufficient contiguous resources */
42 }
43
44 if (i >= amount) {
45 /* Can not find sufficient contiguous resources */
46 return -ENOENT;
47 }
48
49 /* Mark the contiguous bits in resource bit-map as used */
50 for (n = multi; n > 0; n--) {
51 j = i / 8;
52 k = i % 8;
53 rscs[j] |= ((u8)1 << k);
54 i--;
55 }
56
57 *ridx = i + 1;
58
59 return 0;
60}
61
62static int put_resource(u8 *rscs, unsigned int multi, unsigned int idx)
63{
64 unsigned int i, j, k, n;
65
66 /* Mark the contiguous bits in resource bit-map as used */
67 for (n = multi, i = idx; n > 0; n--) {
68 j = i / 8;
69 k = i % 8;
70 rscs[j] &= ~((u8)1 << k);
71 i++;
72 }
73
74 return 0;
75}
76
77int mgr_get_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int *ridx)
78{
79 int err;
80
81 if (n > mgr->avail)
82 return -ENOENT;
83
84 err = get_resource(mgr->rscs, mgr->amount, n, ridx);
85 if (!err)
86 mgr->avail -= n;
87
88 return err;
89}
90
91int mgr_put_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int idx)
92{
93 put_resource(mgr->rscs, n, idx);
94 mgr->avail += n;
95
96 return 0;
97}
98
99static unsigned char offset_in_audio_slot_block[NUM_RSCTYP] = {
100 /* SRC channel is at Audio Ring slot 1 every 16 slots. */
101 [SRC] = 0x1,
102 [AMIXER] = 0x4,
103 [SUM] = 0xc,
104};
105
106static int rsc_index(const struct rsc *rsc)
107{
108 return rsc->conj;
109}
110
111static int audio_ring_slot(const struct rsc *rsc)
112{
113 return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
114}
115
116static int rsc_next_conj(struct rsc *rsc)
117{
118 unsigned int i;
119 for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
120 i++;
121 rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
122 return rsc->conj;
123}
124
125static int rsc_master(struct rsc *rsc)
126{
127 return rsc->conj = rsc->idx;
128}
129
130static struct rsc_ops rsc_generic_ops = {
131 .index = rsc_index,
132 .output_slot = audio_ring_slot,
133 .master = rsc_master,
134 .next_conj = rsc_next_conj,
135};
136
137int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw)
138{
139 int err = 0;
140
141 rsc->idx = idx;
142 rsc->conj = idx;
143 rsc->type = type;
144 rsc->msr = msr;
145 rsc->hw = hw;
146 rsc->ops = &rsc_generic_ops;
147 if (NULL == hw) {
148 rsc->ctrl_blk = NULL;
149 return 0;
150 }
151
152 switch (type) {
153 case SRC:
154 err = ((struct hw *)hw)->src_rsc_get_ctrl_blk(&rsc->ctrl_blk);
155 break;
156 case AMIXER:
157 err = ((struct hw *)hw)->
158 amixer_rsc_get_ctrl_blk(&rsc->ctrl_blk);
159 break;
160 case SRCIMP:
161 case SUM:
162 case DAIO:
163 break;
164 default:
165 printk(KERN_ERR
166 "ctxfi: Invalid resource type value %d!\n", type);
167 return -EINVAL;
168 }
169
170 if (err) {
171 printk(KERN_ERR
172 "ctxfi: Failed to get resource control block!\n");
173 return err;
174 }
175
176 return 0;
177}
178
179int rsc_uninit(struct rsc *rsc)
180{
181 if ((NULL != rsc->hw) && (NULL != rsc->ctrl_blk)) {
182 switch (rsc->type) {
183 case SRC:
184 ((struct hw *)rsc->hw)->
185 src_rsc_put_ctrl_blk(rsc->ctrl_blk);
186 break;
187 case AMIXER:
188 ((struct hw *)rsc->hw)->
189 amixer_rsc_put_ctrl_blk(rsc->ctrl_blk);
190 break;
191 case SUM:
192 case DAIO:
193 break;
194 default:
195 printk(KERN_ERR "ctxfi: "
196 "Invalid resource type value %d!\n", rsc->type);
197 break;
198 }
199
200 rsc->hw = rsc->ctrl_blk = NULL;
201 }
202
203 rsc->idx = rsc->conj = 0;
204 rsc->type = NUM_RSCTYP;
205 rsc->msr = 0;
206
207 return 0;
208}
209
210int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
211 unsigned int amount, void *hw_obj)
212{
213 int err = 0;
214 struct hw *hw = hw_obj;
215
216 mgr->type = NUM_RSCTYP;
217
218 mgr->rscs = kzalloc(((amount + 8 - 1) / 8), GFP_KERNEL);
219 if (NULL == mgr->rscs)
220 return -ENOMEM;
221
222 switch (type) {
223 case SRC:
224 err = hw->src_mgr_get_ctrl_blk(&mgr->ctrl_blk);
225 break;
226 case SRCIMP:
227 err = hw->srcimp_mgr_get_ctrl_blk(&mgr->ctrl_blk);
228 break;
229 case AMIXER:
230 err = hw->amixer_mgr_get_ctrl_blk(&mgr->ctrl_blk);
231 break;
232 case DAIO:
233 err = hw->daio_mgr_get_ctrl_blk(hw, &mgr->ctrl_blk);
234 break;
235 case SUM:
236 break;
237 default:
238 printk(KERN_ERR
239 "ctxfi: Invalid resource type value %d!\n", type);
240 err = -EINVAL;
241 goto error;
242 }
243
244 if (err) {
245 printk(KERN_ERR
246 "ctxfi: Failed to get manager control block!\n");
247 goto error;
248 }
249
250 mgr->type = type;
251 mgr->avail = mgr->amount = amount;
252 mgr->hw = hw;
253
254 return 0;
255
256error:
257 kfree(mgr->rscs);
258 return err;
259}
260
261int rsc_mgr_uninit(struct rsc_mgr *mgr)
262{
263 if (NULL != mgr->rscs) {
264 kfree(mgr->rscs);
265 mgr->rscs = NULL;
266 }
267
268 if ((NULL != mgr->hw) && (NULL != mgr->ctrl_blk)) {
269 switch (mgr->type) {
270 case SRC:
271 ((struct hw *)mgr->hw)->
272 src_mgr_put_ctrl_blk(mgr->ctrl_blk);
273 break;
274 case SRCIMP:
275 ((struct hw *)mgr->hw)->
276 srcimp_mgr_put_ctrl_blk(mgr->ctrl_blk);
277 break;
278 case AMIXER:
279 ((struct hw *)mgr->hw)->
280 amixer_mgr_put_ctrl_blk(mgr->ctrl_blk);
281 break;
282 case DAIO:
283 ((struct hw *)mgr->hw)->
284 daio_mgr_put_ctrl_blk(mgr->ctrl_blk);
285 break;
286 case SUM:
287 break;
288 default:
289 printk(KERN_ERR "ctxfi: "
290 "Invalid resource type value %d!\n", mgr->type);
291 break;
292 }
293
294 mgr->hw = mgr->ctrl_blk = NULL;
295 }
296
297 mgr->type = NUM_RSCTYP;
298 mgr->avail = mgr->amount = 0;
299
300 return 0;
301}
diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
new file mode 100644
index 000000000000..0838c2e84f8b
--- /dev/null
+++ b/sound/pci/ctxfi/ctresource.h
@@ -0,0 +1,72 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctresource.h
9 *
10 * @Brief
11 * This file contains the definition of generic hardware resources for
12 * resource management.
13 *
14 * @Author Liu Chun
15 * @Date May 13 2008
16 *
17 */
18
19#ifndef CTRESOURCE_H
20#define CTRESOURCE_H
21
22#include <linux/types.h>
23
24enum RSCTYP {
25 SRC,
26 SRCIMP,
27 AMIXER,
28 SUM,
29 DAIO,
30 NUM_RSCTYP /* This must be the last one and less than 16 */
31};
32
33struct rsc_ops;
34
35struct rsc {
36 u32 idx:12; /* The index of a resource */
37 u32 type:4; /* The type (RSCTYP) of a resource */
38 u32 conj:12; /* Current conjugate index */
39 u32 msr:4; /* The Master Sample Rate a resource working on */
40 void *ctrl_blk; /* Chip specific control info block for a resource */
41 void *hw; /* Chip specific object for hardware access means */
42 struct rsc_ops *ops; /* Generic resource operations */
43};
44
45struct rsc_ops {
46 int (*master)(struct rsc *rsc); /* Move to master resource */
47 int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
48 int (*index)(const struct rsc *rsc); /* Return the index of resource */
49 /* Return the output slot number */
50 int (*output_slot)(const struct rsc *rsc);
51};
52
53int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw);
54int rsc_uninit(struct rsc *rsc);
55
56struct rsc_mgr {
57 enum RSCTYP type; /* The type (RSCTYP) of resource to manage */
58 unsigned int amount; /* The total amount of a kind of resource */
59 unsigned int avail; /* The amount of currently available resources */
60 unsigned char *rscs; /* The bit-map for resource allocation */
61 void *ctrl_blk; /* Chip specific control info block */
62 void *hw; /* Chip specific object for hardware access */
63};
64
65/* Resource management is based on bit-map mechanism */
66int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
67 unsigned int amount, void *hw);
68int rsc_mgr_uninit(struct rsc_mgr *mgr);
69int mgr_get_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int *ridx);
70int mgr_put_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int idx);
71
72#endif /* CTRESOURCE_H */
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
new file mode 100644
index 000000000000..e1c145d8b702
--- /dev/null
+++ b/sound/pci/ctxfi/ctsrc.c
@@ -0,0 +1,886 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctsrc.c
9 *
10 * @Brief
11 * This file contains the implementation of the Sample Rate Convertor
12 * resource management object.
13 *
14 * @Author Liu Chun
15 * @Date May 13 2008
16 *
17 */
18
19#include "ctsrc.h"
20#include "cthardware.h"
21#include <linux/slab.h>
22
23#define SRC_RESOURCE_NUM 64
24#define SRCIMP_RESOURCE_NUM 256
25
26static unsigned int conj_mask;
27
28static int src_default_config_memrd(struct src *src);
29static int src_default_config_memwr(struct src *src);
30static int src_default_config_arcrw(struct src *src);
31
32static int (*src_default_config[3])(struct src *) = {
33 [MEMRD] = src_default_config_memrd,
34 [MEMWR] = src_default_config_memwr,
35 [ARCRW] = src_default_config_arcrw
36};
37
38static int src_set_state(struct src *src, unsigned int state)
39{
40 struct hw *hw;
41
42 hw = src->rsc.hw;
43 hw->src_set_state(src->rsc.ctrl_blk, state);
44
45 return 0;
46}
47
48static int src_set_bm(struct src *src, unsigned int bm)
49{
50 struct hw *hw;
51
52 hw = src->rsc.hw;
53 hw->src_set_bm(src->rsc.ctrl_blk, bm);
54
55 return 0;
56}
57
58static int src_set_sf(struct src *src, unsigned int sf)
59{
60 struct hw *hw;
61
62 hw = src->rsc.hw;
63 hw->src_set_sf(src->rsc.ctrl_blk, sf);
64
65 return 0;
66}
67
68static int src_set_pm(struct src *src, unsigned int pm)
69{
70 struct hw *hw;
71
72 hw = src->rsc.hw;
73 hw->src_set_pm(src->rsc.ctrl_blk, pm);
74
75 return 0;
76}
77
78static int src_set_rom(struct src *src, unsigned int rom)
79{
80 struct hw *hw;
81
82 hw = src->rsc.hw;
83 hw->src_set_rom(src->rsc.ctrl_blk, rom);
84
85 return 0;
86}
87
88static int src_set_vo(struct src *src, unsigned int vo)
89{
90 struct hw *hw;
91
92 hw = src->rsc.hw;
93 hw->src_set_vo(src->rsc.ctrl_blk, vo);
94
95 return 0;
96}
97
98static int src_set_st(struct src *src, unsigned int st)
99{
100 struct hw *hw;
101
102 hw = src->rsc.hw;
103 hw->src_set_st(src->rsc.ctrl_blk, st);
104
105 return 0;
106}
107
108static int src_set_bp(struct src *src, unsigned int bp)
109{
110 struct hw *hw;
111
112 hw = src->rsc.hw;
113 hw->src_set_bp(src->rsc.ctrl_blk, bp);
114
115 return 0;
116}
117
118static int src_set_cisz(struct src *src, unsigned int cisz)
119{
120 struct hw *hw;
121
122 hw = src->rsc.hw;
123 hw->src_set_cisz(src->rsc.ctrl_blk, cisz);
124
125 return 0;
126}
127
128static int src_set_ca(struct src *src, unsigned int ca)
129{
130 struct hw *hw;
131
132 hw = src->rsc.hw;
133 hw->src_set_ca(src->rsc.ctrl_blk, ca);
134
135 return 0;
136}
137
138static int src_set_sa(struct src *src, unsigned int sa)
139{
140 struct hw *hw;
141
142 hw = src->rsc.hw;
143 hw->src_set_sa(src->rsc.ctrl_blk, sa);
144
145 return 0;
146}
147
148static int src_set_la(struct src *src, unsigned int la)
149{
150 struct hw *hw;
151
152 hw = src->rsc.hw;
153 hw->src_set_la(src->rsc.ctrl_blk, la);
154
155 return 0;
156}
157
158static int src_set_pitch(struct src *src, unsigned int pitch)
159{
160 struct hw *hw;
161
162 hw = src->rsc.hw;
163 hw->src_set_pitch(src->rsc.ctrl_blk, pitch);
164
165 return 0;
166}
167
168static int src_set_clear_zbufs(struct src *src)
169{
170 struct hw *hw;
171
172 hw = src->rsc.hw;
173 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
174
175 return 0;
176}
177
178static int src_commit_write(struct src *src)
179{
180 struct hw *hw;
181 int i;
182 unsigned int dirty = 0;
183
184 hw = src->rsc.hw;
185 src->rsc.ops->master(&src->rsc);
186 if (src->rsc.msr > 1) {
187 /* Save dirty flags for conjugate resource programming */
188 dirty = hw->src_get_dirty(src->rsc.ctrl_blk) & conj_mask;
189 }
190 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
191 src->rsc.ctrl_blk);
192
193 /* Program conjugate parameter mixer resources */
194 if (MEMWR == src->mode)
195 return 0;
196
197 for (i = 1; i < src->rsc.msr; i++) {
198 src->rsc.ops->next_conj(&src->rsc);
199 hw->src_set_dirty(src->rsc.ctrl_blk, dirty);
200 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
201 src->rsc.ctrl_blk);
202 }
203 src->rsc.ops->master(&src->rsc);
204
205 return 0;
206}
207
208static int src_get_ca(struct src *src)
209{
210 struct hw *hw;
211
212 hw = src->rsc.hw;
213 return hw->src_get_ca(hw, src->rsc.ops->index(&src->rsc),
214 src->rsc.ctrl_blk);
215}
216
217static int src_init(struct src *src)
218{
219 src_default_config[src->mode](src);
220
221 return 0;
222}
223
224static struct src *src_next_interleave(struct src *src)
225{
226 return src->intlv;
227}
228
229static int src_default_config_memrd(struct src *src)
230{
231 struct hw *hw = src->rsc.hw;
232 unsigned int rsr, msr;
233
234 hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF);
235 hw->src_set_bm(src->rsc.ctrl_blk, 1);
236 for (rsr = 0, msr = src->rsc.msr; msr > 1; msr >>= 1)
237 rsr++;
238
239 hw->src_set_rsr(src->rsc.ctrl_blk, rsr);
240 hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_S16);
241 hw->src_set_wr(src->rsc.ctrl_blk, 0);
242 hw->src_set_pm(src->rsc.ctrl_blk, 0);
243 hw->src_set_rom(src->rsc.ctrl_blk, 0);
244 hw->src_set_vo(src->rsc.ctrl_blk, 0);
245 hw->src_set_st(src->rsc.ctrl_blk, 0);
246 hw->src_set_ilsz(src->rsc.ctrl_blk, src->multi - 1);
247 hw->src_set_cisz(src->rsc.ctrl_blk, 0x80);
248 hw->src_set_sa(src->rsc.ctrl_blk, 0x0);
249 hw->src_set_la(src->rsc.ctrl_blk, 0x1000);
250 hw->src_set_ca(src->rsc.ctrl_blk, 0x80);
251 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
252 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
253
254 src->rsc.ops->master(&src->rsc);
255 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
256 src->rsc.ctrl_blk);
257
258 for (msr = 1; msr < src->rsc.msr; msr++) {
259 src->rsc.ops->next_conj(&src->rsc);
260 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
261 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
262 src->rsc.ctrl_blk);
263 }
264 src->rsc.ops->master(&src->rsc);
265
266 return 0;
267}
268
269static int src_default_config_memwr(struct src *src)
270{
271 struct hw *hw = src->rsc.hw;
272
273 hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF);
274 hw->src_set_bm(src->rsc.ctrl_blk, 1);
275 hw->src_set_rsr(src->rsc.ctrl_blk, 0);
276 hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_S16);
277 hw->src_set_wr(src->rsc.ctrl_blk, 1);
278 hw->src_set_pm(src->rsc.ctrl_blk, 0);
279 hw->src_set_rom(src->rsc.ctrl_blk, 0);
280 hw->src_set_vo(src->rsc.ctrl_blk, 0);
281 hw->src_set_st(src->rsc.ctrl_blk, 0);
282 hw->src_set_ilsz(src->rsc.ctrl_blk, 0);
283 hw->src_set_cisz(src->rsc.ctrl_blk, 0x80);
284 hw->src_set_sa(src->rsc.ctrl_blk, 0x0);
285 hw->src_set_la(src->rsc.ctrl_blk, 0x1000);
286 hw->src_set_ca(src->rsc.ctrl_blk, 0x80);
287 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
288 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
289
290 src->rsc.ops->master(&src->rsc);
291 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
292 src->rsc.ctrl_blk);
293
294 return 0;
295}
296
297static int src_default_config_arcrw(struct src *src)
298{
299 struct hw *hw = src->rsc.hw;
300 unsigned int rsr, msr;
301 unsigned int dirty;
302
303 hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF);
304 hw->src_set_bm(src->rsc.ctrl_blk, 0);
305 for (rsr = 0, msr = src->rsc.msr; msr > 1; msr >>= 1)
306 rsr++;
307
308 hw->src_set_rsr(src->rsc.ctrl_blk, rsr);
309 hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_F32);
310 hw->src_set_wr(src->rsc.ctrl_blk, 0);
311 hw->src_set_pm(src->rsc.ctrl_blk, 0);
312 hw->src_set_rom(src->rsc.ctrl_blk, 0);
313 hw->src_set_vo(src->rsc.ctrl_blk, 0);
314 hw->src_set_st(src->rsc.ctrl_blk, 0);
315 hw->src_set_ilsz(src->rsc.ctrl_blk, 0);
316 hw->src_set_cisz(src->rsc.ctrl_blk, 0x80);
317 hw->src_set_sa(src->rsc.ctrl_blk, 0x0);
318 /*hw->src_set_sa(src->rsc.ctrl_blk, 0x100);*/
319 hw->src_set_la(src->rsc.ctrl_blk, 0x1000);
320 /*hw->src_set_la(src->rsc.ctrl_blk, 0x03ffffe0);*/
321 hw->src_set_ca(src->rsc.ctrl_blk, 0x80);
322 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000);
323 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1);
324
325 dirty = hw->src_get_dirty(src->rsc.ctrl_blk);
326 src->rsc.ops->master(&src->rsc);
327 for (msr = 0; msr < src->rsc.msr; msr++) {
328 hw->src_set_dirty(src->rsc.ctrl_blk, dirty);
329 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc),
330 src->rsc.ctrl_blk);
331 src->rsc.ops->next_conj(&src->rsc);
332 }
333 src->rsc.ops->master(&src->rsc);
334
335 return 0;
336}
337
338static struct src_rsc_ops src_rsc_ops = {
339 .set_state = src_set_state,
340 .set_bm = src_set_bm,
341 .set_sf = src_set_sf,
342 .set_pm = src_set_pm,
343 .set_rom = src_set_rom,
344 .set_vo = src_set_vo,
345 .set_st = src_set_st,
346 .set_bp = src_set_bp,
347 .set_cisz = src_set_cisz,
348 .set_ca = src_set_ca,
349 .set_sa = src_set_sa,
350 .set_la = src_set_la,
351 .set_pitch = src_set_pitch,
352 .set_clr_zbufs = src_set_clear_zbufs,
353 .commit_write = src_commit_write,
354 .get_ca = src_get_ca,
355 .init = src_init,
356 .next_interleave = src_next_interleave,
357};
358
359static int
360src_rsc_init(struct src *src, u32 idx,
361 const struct src_desc *desc, struct src_mgr *mgr)
362{
363 int err;
364 int i, n;
365 struct src *p;
366
367 n = (MEMRD == desc->mode) ? desc->multi : 1;
368 for (i = 0, p = src; i < n; i++, p++) {
369 err = rsc_init(&p->rsc, idx + i, SRC, desc->msr, mgr->mgr.hw);
370 if (err)
371 goto error1;
372
373 /* Initialize src specific rsc operations */
374 p->ops = &src_rsc_ops;
375 p->multi = (0 == i) ? desc->multi : 1;
376 p->mode = desc->mode;
377 src_default_config[desc->mode](p);
378 mgr->src_enable(mgr, p);
379 p->intlv = p + 1;
380 }
381 (--p)->intlv = NULL; /* Set @intlv of the last SRC to NULL */
382
383 mgr->commit_write(mgr);
384
385 return 0;
386
387error1:
388 for (i--, p--; i >= 0; i--, p--) {
389 mgr->src_disable(mgr, p);
390 rsc_uninit(&p->rsc);
391 }
392 mgr->commit_write(mgr);
393 return err;
394}
395
396static int src_rsc_uninit(struct src *src, struct src_mgr *mgr)
397{
398 int i, n;
399 struct src *p;
400
401 n = (MEMRD == src->mode) ? src->multi : 1;
402 for (i = 0, p = src; i < n; i++, p++) {
403 mgr->src_disable(mgr, p);
404 rsc_uninit(&p->rsc);
405 p->multi = 0;
406 p->ops = NULL;
407 p->mode = NUM_SRCMODES;
408 p->intlv = NULL;
409 }
410 mgr->commit_write(mgr);
411
412 return 0;
413}
414
415static int
416get_src_rsc(struct src_mgr *mgr, const struct src_desc *desc, struct src **rsrc)
417{
418 unsigned int idx = SRC_RESOURCE_NUM;
419 int err;
420 struct src *src;
421 unsigned long flags;
422
423 *rsrc = NULL;
424
425 /* Check whether there are sufficient src resources to meet request. */
426 spin_lock_irqsave(&mgr->mgr_lock, flags);
427 if (MEMRD == desc->mode)
428 err = mgr_get_resource(&mgr->mgr, desc->multi, &idx);
429 else
430 err = mgr_get_resource(&mgr->mgr, 1, &idx);
431
432 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
433 if (err) {
434 printk(KERN_ERR "ctxfi: Can't meet SRC resource request!\n");
435 return err;
436 }
437
438 /* Allocate mem for master src resource */
439 if (MEMRD == desc->mode)
440 src = kzalloc(sizeof(*src)*desc->multi, GFP_KERNEL);
441 else
442 src = kzalloc(sizeof(*src), GFP_KERNEL);
443
444 if (NULL == src) {
445 err = -ENOMEM;
446 goto error1;
447 }
448
449 err = src_rsc_init(src, idx, desc, mgr);
450 if (err)
451 goto error2;
452
453 *rsrc = src;
454
455 return 0;
456
457error2:
458 kfree(src);
459error1:
460 spin_lock_irqsave(&mgr->mgr_lock, flags);
461 if (MEMRD == desc->mode)
462 mgr_put_resource(&mgr->mgr, desc->multi, idx);
463 else
464 mgr_put_resource(&mgr->mgr, 1, idx);
465
466 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
467 return err;
468}
469
470static int put_src_rsc(struct src_mgr *mgr, struct src *src)
471{
472 unsigned long flags;
473
474 spin_lock_irqsave(&mgr->mgr_lock, flags);
475 src->rsc.ops->master(&src->rsc);
476 if (MEMRD == src->mode)
477 mgr_put_resource(&mgr->mgr, src->multi,
478 src->rsc.ops->index(&src->rsc));
479 else
480 mgr_put_resource(&mgr->mgr, 1, src->rsc.ops->index(&src->rsc));
481
482 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
483 src_rsc_uninit(src, mgr);
484 kfree(src);
485
486 return 0;
487}
488
489static int src_enable_s(struct src_mgr *mgr, struct src *src)
490{
491 struct hw *hw = mgr->mgr.hw;
492 int i;
493
494 src->rsc.ops->master(&src->rsc);
495 for (i = 0; i < src->rsc.msr; i++) {
496 hw->src_mgr_enbs_src(mgr->mgr.ctrl_blk,
497 src->rsc.ops->index(&src->rsc));
498 src->rsc.ops->next_conj(&src->rsc);
499 }
500 src->rsc.ops->master(&src->rsc);
501
502 return 0;
503}
504
505static int src_enable(struct src_mgr *mgr, struct src *src)
506{
507 struct hw *hw = mgr->mgr.hw;
508 int i;
509
510 src->rsc.ops->master(&src->rsc);
511 for (i = 0; i < src->rsc.msr; i++) {
512 hw->src_mgr_enb_src(mgr->mgr.ctrl_blk,
513 src->rsc.ops->index(&src->rsc));
514 src->rsc.ops->next_conj(&src->rsc);
515 }
516 src->rsc.ops->master(&src->rsc);
517
518 return 0;
519}
520
521static int src_disable(struct src_mgr *mgr, struct src *src)
522{
523 struct hw *hw = mgr->mgr.hw;
524 int i;
525
526 src->rsc.ops->master(&src->rsc);
527 for (i = 0; i < src->rsc.msr; i++) {
528 hw->src_mgr_dsb_src(mgr->mgr.ctrl_blk,
529 src->rsc.ops->index(&src->rsc));
530 src->rsc.ops->next_conj(&src->rsc);
531 }
532 src->rsc.ops->master(&src->rsc);
533
534 return 0;
535}
536
537static int src_mgr_commit_write(struct src_mgr *mgr)
538{
539 struct hw *hw = mgr->mgr.hw;
540
541 hw->src_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
542
543 return 0;
544}
545
546int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
547{
548 int err, i;
549 struct src_mgr *src_mgr;
550
551 *rsrc_mgr = NULL;
552 src_mgr = kzalloc(sizeof(*src_mgr), GFP_KERNEL);
553 if (NULL == src_mgr)
554 return -ENOMEM;
555
556 err = rsc_mgr_init(&src_mgr->mgr, SRC, SRC_RESOURCE_NUM, hw);
557 if (err)
558 goto error1;
559
560 spin_lock_init(&src_mgr->mgr_lock);
561 conj_mask = ((struct hw *)hw)->src_dirty_conj_mask();
562
563 src_mgr->get_src = get_src_rsc;
564 src_mgr->put_src = put_src_rsc;
565 src_mgr->src_enable_s = src_enable_s;
566 src_mgr->src_enable = src_enable;
567 src_mgr->src_disable = src_disable;
568 src_mgr->commit_write = src_mgr_commit_write;
569
570 /* Disable all SRC resources. */
571 for (i = 0; i < 256; i++)
572 ((struct hw *)hw)->src_mgr_dsb_src(src_mgr->mgr.ctrl_blk, i);
573
574 ((struct hw *)hw)->src_mgr_commit_write(hw, src_mgr->mgr.ctrl_blk);
575
576 *rsrc_mgr = src_mgr;
577
578 return 0;
579
580error1:
581 kfree(src_mgr);
582 return err;
583}
584
585int src_mgr_destroy(struct src_mgr *src_mgr)
586{
587 rsc_mgr_uninit(&src_mgr->mgr);
588 kfree(src_mgr);
589
590 return 0;
591}
592
593/* SRCIMP resource manager operations */
594
595static int srcimp_master(struct rsc *rsc)
596{
597 rsc->conj = 0;
598 return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
599}
600
601static int srcimp_next_conj(struct rsc *rsc)
602{
603 rsc->conj++;
604 return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
605}
606
607static int srcimp_index(const struct rsc *rsc)
608{
609 return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
610}
611
612static struct rsc_ops srcimp_basic_rsc_ops = {
613 .master = srcimp_master,
614 .next_conj = srcimp_next_conj,
615 .index = srcimp_index,
616 .output_slot = NULL,
617};
618
619static int srcimp_map(struct srcimp *srcimp, struct src *src, struct rsc *input)
620{
621 struct imapper *entry;
622 int i;
623
624 srcimp->rsc.ops->master(&srcimp->rsc);
625 src->rsc.ops->master(&src->rsc);
626 input->ops->master(input);
627
628 /* Program master and conjugate resources */
629 for (i = 0; i < srcimp->rsc.msr; i++) {
630 entry = &srcimp->imappers[i];
631 entry->slot = input->ops->output_slot(input);
632 entry->user = src->rsc.ops->index(&src->rsc);
633 entry->addr = srcimp->rsc.ops->index(&srcimp->rsc);
634 srcimp->mgr->imap_add(srcimp->mgr, entry);
635 srcimp->mapped |= (0x1 << i);
636
637 srcimp->rsc.ops->next_conj(&srcimp->rsc);
638 input->ops->next_conj(input);
639 }
640
641 srcimp->rsc.ops->master(&srcimp->rsc);
642 input->ops->master(input);
643
644 return 0;
645}
646
647static int srcimp_unmap(struct srcimp *srcimp)
648{
649 int i;
650
651 /* Program master and conjugate resources */
652 for (i = 0; i < srcimp->rsc.msr; i++) {
653 if (srcimp->mapped & (0x1 << i)) {
654 srcimp->mgr->imap_delete(srcimp->mgr,
655 &srcimp->imappers[i]);
656 srcimp->mapped &= ~(0x1 << i);
657 }
658 }
659
660 return 0;
661}
662
663static struct srcimp_rsc_ops srcimp_ops = {
664 .map = srcimp_map,
665 .unmap = srcimp_unmap
666};
667
668static int srcimp_rsc_init(struct srcimp *srcimp,
669 const struct srcimp_desc *desc,
670 struct srcimp_mgr *mgr)
671{
672 int err;
673
674 err = rsc_init(&srcimp->rsc, srcimp->idx[0],
675 SRCIMP, desc->msr, mgr->mgr.hw);
676 if (err)
677 return err;
678
679 /* Reserve memory for imapper nodes */
680 srcimp->imappers = kzalloc(sizeof(struct imapper)*desc->msr,
681 GFP_KERNEL);
682 if (NULL == srcimp->imappers) {
683 err = -ENOMEM;
684 goto error1;
685 }
686
687 /* Set srcimp specific operations */
688 srcimp->rsc.ops = &srcimp_basic_rsc_ops;
689 srcimp->ops = &srcimp_ops;
690 srcimp->mgr = mgr;
691
692 srcimp->rsc.ops->master(&srcimp->rsc);
693
694 return 0;
695
696error1:
697 rsc_uninit(&srcimp->rsc);
698 return err;
699}
700
701static int srcimp_rsc_uninit(struct srcimp *srcimp)
702{
703 if (NULL != srcimp->imappers) {
704 kfree(srcimp->imappers);
705 srcimp->imappers = NULL;
706 }
707 srcimp->ops = NULL;
708 srcimp->mgr = NULL;
709 rsc_uninit(&srcimp->rsc);
710
711 return 0;
712}
713
714static int get_srcimp_rsc(struct srcimp_mgr *mgr,
715 const struct srcimp_desc *desc,
716 struct srcimp **rsrcimp)
717{
718 int err, i;
719 unsigned int idx;
720 struct srcimp *srcimp;
721 unsigned long flags;
722
723 *rsrcimp = NULL;
724
725 /* Allocate mem for SRCIMP resource */
726 srcimp = kzalloc(sizeof(*srcimp), GFP_KERNEL);
727 if (NULL == srcimp) {
728 err = -ENOMEM;
729 return err;
730 }
731
732 /* Check whether there are sufficient SRCIMP resources. */
733 spin_lock_irqsave(&mgr->mgr_lock, flags);
734 for (i = 0; i < desc->msr; i++) {
735 err = mgr_get_resource(&mgr->mgr, 1, &idx);
736 if (err)
737 break;
738
739 srcimp->idx[i] = idx;
740 }
741 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
742 if (err) {
743 printk(KERN_ERR "ctxfi: Can't meet SRCIMP resource request!\n");
744 goto error1;
745 }
746
747 err = srcimp_rsc_init(srcimp, desc, mgr);
748 if (err)
749 goto error1;
750
751 *rsrcimp = srcimp;
752
753 return 0;
754
755error1:
756 spin_lock_irqsave(&mgr->mgr_lock, flags);
757 for (i--; i >= 0; i--)
758 mgr_put_resource(&mgr->mgr, 1, srcimp->idx[i]);
759
760 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
761 kfree(srcimp);
762 return err;
763}
764
765static int put_srcimp_rsc(struct srcimp_mgr *mgr, struct srcimp *srcimp)
766{
767 unsigned long flags;
768 int i;
769
770 spin_lock_irqsave(&mgr->mgr_lock, flags);
771 for (i = 0; i < srcimp->rsc.msr; i++)
772 mgr_put_resource(&mgr->mgr, 1, srcimp->idx[i]);
773
774 spin_unlock_irqrestore(&mgr->mgr_lock, flags);
775 srcimp_rsc_uninit(srcimp);
776 kfree(srcimp);
777
778 return 0;
779}
780
781static int srcimp_map_op(void *data, struct imapper *entry)
782{
783 struct rsc_mgr *mgr = &((struct srcimp_mgr *)data)->mgr;
784 struct hw *hw = mgr->hw;
785
786 hw->srcimp_mgr_set_imaparc(mgr->ctrl_blk, entry->slot);
787 hw->srcimp_mgr_set_imapuser(mgr->ctrl_blk, entry->user);
788 hw->srcimp_mgr_set_imapnxt(mgr->ctrl_blk, entry->next);
789 hw->srcimp_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr);
790 hw->srcimp_mgr_commit_write(mgr->hw, mgr->ctrl_blk);
791
792 return 0;
793}
794
795static int srcimp_imap_add(struct srcimp_mgr *mgr, struct imapper *entry)
796{
797 unsigned long flags;
798 int err;
799
800 spin_lock_irqsave(&mgr->imap_lock, flags);
801 if ((0 == entry->addr) && (mgr->init_imap_added)) {
802 input_mapper_delete(&mgr->imappers,
803 mgr->init_imap, srcimp_map_op, mgr);
804 mgr->init_imap_added = 0;
805 }
806 err = input_mapper_add(&mgr->imappers, entry, srcimp_map_op, mgr);
807 spin_unlock_irqrestore(&mgr->imap_lock, flags);
808
809 return err;
810}
811
812static int srcimp_imap_delete(struct srcimp_mgr *mgr, struct imapper *entry)
813{
814 unsigned long flags;
815 int err;
816
817 spin_lock_irqsave(&mgr->imap_lock, flags);
818 err = input_mapper_delete(&mgr->imappers, entry, srcimp_map_op, mgr);
819 if (list_empty(&mgr->imappers)) {
820 input_mapper_add(&mgr->imappers, mgr->init_imap,
821 srcimp_map_op, mgr);
822 mgr->init_imap_added = 1;
823 }
824 spin_unlock_irqrestore(&mgr->imap_lock, flags);
825
826 return err;
827}
828
829int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
830{
831 int err;
832 struct srcimp_mgr *srcimp_mgr;
833 struct imapper *entry;
834
835 *rsrcimp_mgr = NULL;
836 srcimp_mgr = kzalloc(sizeof(*srcimp_mgr), GFP_KERNEL);
837 if (NULL == srcimp_mgr)
838 return -ENOMEM;
839
840 err = rsc_mgr_init(&srcimp_mgr->mgr, SRCIMP, SRCIMP_RESOURCE_NUM, hw);
841 if (err)
842 goto error1;
843
844 spin_lock_init(&srcimp_mgr->mgr_lock);
845 spin_lock_init(&srcimp_mgr->imap_lock);
846 INIT_LIST_HEAD(&srcimp_mgr->imappers);
847 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
848 if (NULL == entry) {
849 err = -ENOMEM;
850 goto error2;
851 }
852 entry->slot = entry->addr = entry->next = entry->user = 0;
853 list_add(&entry->list, &srcimp_mgr->imappers);
854 srcimp_mgr->init_imap = entry;
855 srcimp_mgr->init_imap_added = 1;
856
857 srcimp_mgr->get_srcimp = get_srcimp_rsc;
858 srcimp_mgr->put_srcimp = put_srcimp_rsc;
859 srcimp_mgr->imap_add = srcimp_imap_add;
860 srcimp_mgr->imap_delete = srcimp_imap_delete;
861
862 *rsrcimp_mgr = srcimp_mgr;
863
864 return 0;
865
866error2:
867 rsc_mgr_uninit(&srcimp_mgr->mgr);
868error1:
869 kfree(srcimp_mgr);
870 return err;
871}
872
873int srcimp_mgr_destroy(struct srcimp_mgr *srcimp_mgr)
874{
875 unsigned long flags;
876
877 /* free src input mapper list */
878 spin_lock_irqsave(&srcimp_mgr->imap_lock, flags);
879 free_input_mapper_list(&srcimp_mgr->imappers);
880 spin_unlock_irqrestore(&srcimp_mgr->imap_lock, flags);
881
882 rsc_mgr_uninit(&srcimp_mgr->mgr);
883 kfree(srcimp_mgr);
884
885 return 0;
886}
diff --git a/sound/pci/ctxfi/ctsrc.h b/sound/pci/ctxfi/ctsrc.h
new file mode 100644
index 000000000000..259366aabcac
--- /dev/null
+++ b/sound/pci/ctxfi/ctsrc.h
@@ -0,0 +1,149 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctsrc.h
9 *
10 * @Brief
11 * This file contains the definition of the Sample Rate Convertor
12 * resource management object.
13 *
14 * @Author Liu Chun
15 * @Date May 13 2008
16 *
17 */
18
19#ifndef CTSRC_H
20#define CTSRC_H
21
22#include "ctresource.h"
23#include "ctimap.h"
24#include <linux/spinlock.h>
25#include <linux/list.h>
26
27#define SRC_STATE_OFF 0x0
28#define SRC_STATE_INIT 0x4
29#define SRC_STATE_RUN 0x5
30
31#define SRC_SF_U8 0x0
32#define SRC_SF_S16 0x1
33#define SRC_SF_S24 0x2
34#define SRC_SF_S32 0x3
35#define SRC_SF_F32 0x4
36
37/* Define the descriptor of a src resource */
38enum SRCMODE {
39 MEMRD, /* Read data from host memory */
40 MEMWR, /* Write data to host memory */
41 ARCRW, /* Read from and write to audio ring channel */
42 NUM_SRCMODES
43};
44
45struct src_rsc_ops;
46
47struct src {
48 struct rsc rsc; /* Basic resource info */
49 struct src *intlv; /* Pointer to next interleaved SRC in a series */
50 struct src_rsc_ops *ops; /* SRC specific operations */
51 /* Number of contiguous srcs for interleaved usage */
52 unsigned char multi;
53 unsigned char mode; /* Working mode of this SRC resource */
54};
55
56struct src_rsc_ops {
57 int (*set_state)(struct src *src, unsigned int state);
58 int (*set_bm)(struct src *src, unsigned int bm);
59 int (*set_sf)(struct src *src, unsigned int sf);
60 int (*set_pm)(struct src *src, unsigned int pm);
61 int (*set_rom)(struct src *src, unsigned int rom);
62 int (*set_vo)(struct src *src, unsigned int vo);
63 int (*set_st)(struct src *src, unsigned int st);
64 int (*set_bp)(struct src *src, unsigned int bp);
65 int (*set_cisz)(struct src *src, unsigned int cisz);
66 int (*set_ca)(struct src *src, unsigned int ca);
67 int (*set_sa)(struct src *src, unsigned int sa);
68 int (*set_la)(struct src *src, unsigned int la);
69 int (*set_pitch)(struct src *src, unsigned int pitch);
70 int (*set_clr_zbufs)(struct src *src);
71 int (*commit_write)(struct src *src);
72 int (*get_ca)(struct src *src);
73 int (*init)(struct src *src);
74 struct src* (*next_interleave)(struct src *src);
75};
76
77/* Define src resource request description info */
78struct src_desc {
79 /* Number of contiguous master srcs for interleaved usage */
80 unsigned char multi;
81 unsigned char msr;
82 unsigned char mode; /* Working mode of the requested srcs */
83};
84
85/* Define src manager object */
86struct src_mgr {
87 struct rsc_mgr mgr; /* Basic resource manager info */
88 spinlock_t mgr_lock;
89
90 /* request src resource */
91 int (*get_src)(struct src_mgr *mgr,
92 const struct src_desc *desc, struct src **rsrc);
93 /* return src resource */
94 int (*put_src)(struct src_mgr *mgr, struct src *src);
95 int (*src_enable_s)(struct src_mgr *mgr, struct src *src);
96 int (*src_enable)(struct src_mgr *mgr, struct src *src);
97 int (*src_disable)(struct src_mgr *mgr, struct src *src);
98 int (*commit_write)(struct src_mgr *mgr);
99};
100
101/* Define the descriptor of a SRC Input Mapper resource */
102struct srcimp_mgr;
103struct srcimp_rsc_ops;
104
105struct srcimp {
106 struct rsc rsc;
107 unsigned char idx[8];
108 struct imapper *imappers;
109 unsigned int mapped; /* A bit-map indicating which conj rsc is mapped */
110 struct srcimp_mgr *mgr;
111 struct srcimp_rsc_ops *ops;
112};
113
114struct srcimp_rsc_ops {
115 int (*map)(struct srcimp *srcimp, struct src *user, struct rsc *input);
116 int (*unmap)(struct srcimp *srcimp);
117};
118
119/* Define SRCIMP resource request description info */
120struct srcimp_desc {
121 unsigned int msr;
122};
123
124struct srcimp_mgr {
125 struct rsc_mgr mgr; /* Basic resource manager info */
126 spinlock_t mgr_lock;
127 spinlock_t imap_lock;
128 struct list_head imappers;
129 struct imapper *init_imap;
130 unsigned int init_imap_added;
131
132 /* request srcimp resource */
133 int (*get_srcimp)(struct srcimp_mgr *mgr,
134 const struct srcimp_desc *desc,
135 struct srcimp **rsrcimp);
136 /* return srcimp resource */
137 int (*put_srcimp)(struct srcimp_mgr *mgr, struct srcimp *srcimp);
138 int (*imap_add)(struct srcimp_mgr *mgr, struct imapper *entry);
139 int (*imap_delete)(struct srcimp_mgr *mgr, struct imapper *entry);
140};
141
142/* Constructor and destructor of SRC resource manager */
143int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr);
144int src_mgr_destroy(struct src_mgr *src_mgr);
145/* Constructor and destructor of SRCIMP resource manager */
146int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrc_mgr);
147int srcimp_mgr_destroy(struct srcimp_mgr *srcimp_mgr);
148
149#endif /* CTSRC_H */
diff --git a/sound/pci/ctxfi/cttimer.c b/sound/pci/ctxfi/cttimer.c
new file mode 100644
index 000000000000..779c6c3591a5
--- /dev/null
+++ b/sound/pci/ctxfi/cttimer.c
@@ -0,0 +1,441 @@
1/*
2 * PCM timer handling on ctxfi
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 */
8
9#include <linux/slab.h>
10#include <linux/math64.h>
11#include <linux/moduleparam.h>
12#include <sound/core.h>
13#include <sound/pcm.h>
14#include "ctatc.h"
15#include "cthardware.h"
16#include "cttimer.h"
17
18static int use_system_timer;
19MODULE_PARM_DESC(use_system_timer, "Foce to use system-timer");
20module_param(use_system_timer, bool, S_IRUGO);
21
22struct ct_timer_ops {
23 void (*init)(struct ct_timer_instance *);
24 void (*prepare)(struct ct_timer_instance *);
25 void (*start)(struct ct_timer_instance *);
26 void (*stop)(struct ct_timer_instance *);
27 void (*free_instance)(struct ct_timer_instance *);
28 void (*interrupt)(struct ct_timer *);
29 void (*free_global)(struct ct_timer *);
30};
31
32/* timer instance -- assigned to each PCM stream */
33struct ct_timer_instance {
34 spinlock_t lock;
35 struct ct_timer *timer_base;
36 struct ct_atc_pcm *apcm;
37 struct snd_pcm_substream *substream;
38 struct timer_list timer;
39 struct list_head instance_list;
40 struct list_head running_list;
41 unsigned int position;
42 unsigned int frag_count;
43 unsigned int running:1;
44 unsigned int need_update:1;
45};
46
47/* timer instance manager */
48struct ct_timer {
49 spinlock_t lock; /* global timer lock (for xfitimer) */
50 spinlock_t list_lock; /* lock for instance list */
51 struct ct_atc *atc;
52 struct ct_timer_ops *ops;
53 struct list_head instance_head;
54 struct list_head running_head;
55 unsigned int wc; /* current wallclock */
56 unsigned int irq_handling:1; /* in IRQ handling */
57 unsigned int reprogram:1; /* need to reprogram the internval */
58 unsigned int running:1; /* global timer running */
59};
60
61
62/*
63 * system-timer-based updates
64 */
65
66static void ct_systimer_callback(unsigned long data)
67{
68 struct ct_timer_instance *ti = (struct ct_timer_instance *)data;
69 struct snd_pcm_substream *substream = ti->substream;
70 struct snd_pcm_runtime *runtime = substream->runtime;
71 struct ct_atc_pcm *apcm = ti->apcm;
72 unsigned int period_size = runtime->period_size;
73 unsigned int buffer_size = runtime->buffer_size;
74 unsigned long flags;
75 unsigned int position, dist, interval;
76
77 position = substream->ops->pointer(substream);
78 dist = (position + buffer_size - ti->position) % buffer_size;
79 if (dist >= period_size ||
80 position / period_size != ti->position / period_size) {
81 apcm->interrupt(apcm);
82 ti->position = position;
83 }
84 /* Add extra HZ*5/1000 to avoid overrun issue when recording
85 * at 8kHz in 8-bit format or at 88kHz in 24-bit format. */
86 interval = ((period_size - (position % period_size))
87 * HZ + (runtime->rate - 1)) / runtime->rate + HZ * 5 / 1000;
88 spin_lock_irqsave(&ti->lock, flags);
89 if (ti->running)
90 mod_timer(&ti->timer, jiffies + interval);
91 spin_unlock_irqrestore(&ti->lock, flags);
92}
93
94static void ct_systimer_init(struct ct_timer_instance *ti)
95{
96 setup_timer(&ti->timer, ct_systimer_callback,
97 (unsigned long)ti);
98}
99
100static void ct_systimer_start(struct ct_timer_instance *ti)
101{
102 struct snd_pcm_runtime *runtime = ti->substream->runtime;
103 unsigned long flags;
104
105 spin_lock_irqsave(&ti->lock, flags);
106 ti->running = 1;
107 mod_timer(&ti->timer,
108 jiffies + (runtime->period_size * HZ +
109 (runtime->rate - 1)) / runtime->rate);
110 spin_unlock_irqrestore(&ti->lock, flags);
111}
112
113static void ct_systimer_stop(struct ct_timer_instance *ti)
114{
115 unsigned long flags;
116
117 spin_lock_irqsave(&ti->lock, flags);
118 ti->running = 0;
119 del_timer(&ti->timer);
120 spin_unlock_irqrestore(&ti->lock, flags);
121}
122
123static void ct_systimer_prepare(struct ct_timer_instance *ti)
124{
125 ct_systimer_stop(ti);
126 try_to_del_timer_sync(&ti->timer);
127}
128
129#define ct_systimer_free ct_systimer_prepare
130
131static struct ct_timer_ops ct_systimer_ops = {
132 .init = ct_systimer_init,
133 .free_instance = ct_systimer_free,
134 .prepare = ct_systimer_prepare,
135 .start = ct_systimer_start,
136 .stop = ct_systimer_stop,
137};
138
139
140/*
141 * Handling multiple streams using a global emu20k1 timer irq
142 */
143
144#define CT_TIMER_FREQ 48000
145#define MIN_TICKS 1
146#define MAX_TICKS ((1 << 13) - 1)
147
148static void ct_xfitimer_irq_rearm(struct ct_timer *atimer, int ticks)
149{
150 struct hw *hw = atimer->atc->hw;
151 if (ticks > MAX_TICKS)
152 ticks = MAX_TICKS;
153 hw->set_timer_tick(hw, ticks);
154 if (!atimer->running)
155 hw->set_timer_irq(hw, 1);
156 atimer->running = 1;
157}
158
159static void ct_xfitimer_irq_stop(struct ct_timer *atimer)
160{
161 if (atimer->running) {
162 struct hw *hw = atimer->atc->hw;
163 hw->set_timer_irq(hw, 0);
164 hw->set_timer_tick(hw, 0);
165 atimer->running = 0;
166 }
167}
168
169static inline unsigned int ct_xfitimer_get_wc(struct ct_timer *atimer)
170{
171 struct hw *hw = atimer->atc->hw;
172 return hw->get_wc(hw);
173}
174
175/*
176 * reprogram the timer interval;
177 * checks the running instance list and determines the next timer interval.
178 * also updates the each stream position, returns the number of streams
179 * to call snd_pcm_period_elapsed() appropriately
180 *
181 * call this inside the lock and irq disabled
182 */
183static int ct_xfitimer_reprogram(struct ct_timer *atimer)
184{
185 struct ct_timer_instance *ti;
186 unsigned int min_intr = (unsigned int)-1;
187 int updates = 0;
188 unsigned int wc, diff;
189
190 if (list_empty(&atimer->running_head)) {
191 ct_xfitimer_irq_stop(atimer);
192 atimer->reprogram = 0; /* clear flag */
193 return 0;
194 }
195
196 wc = ct_xfitimer_get_wc(atimer);
197 diff = wc - atimer->wc;
198 atimer->wc = wc;
199 list_for_each_entry(ti, &atimer->running_head, running_list) {
200 if (ti->frag_count > diff)
201 ti->frag_count -= diff;
202 else {
203 unsigned int pos;
204 unsigned int period_size, rate;
205
206 period_size = ti->substream->runtime->period_size;
207 rate = ti->substream->runtime->rate;
208 pos = ti->substream->ops->pointer(ti->substream);
209 if (pos / period_size != ti->position / period_size) {
210 ti->need_update = 1;
211 ti->position = pos;
212 updates++;
213 }
214 pos %= period_size;
215 pos = period_size - pos;
216 ti->frag_count = div_u64((u64)pos * CT_TIMER_FREQ +
217 rate - 1, rate);
218 }
219 if (ti->frag_count < min_intr)
220 min_intr = ti->frag_count;
221 }
222
223 if (min_intr < MIN_TICKS)
224 min_intr = MIN_TICKS;
225 ct_xfitimer_irq_rearm(atimer, min_intr);
226 atimer->reprogram = 0; /* clear flag */
227 return updates;
228}
229
230/* look through the instance list and call period_elapsed if needed */
231static void ct_xfitimer_check_period(struct ct_timer *atimer)
232{
233 struct ct_timer_instance *ti;
234 unsigned long flags;
235
236 spin_lock_irqsave(&atimer->list_lock, flags);
237 list_for_each_entry(ti, &atimer->instance_head, instance_list) {
238 if (ti->need_update) {
239 ti->need_update = 0;
240 ti->apcm->interrupt(ti->apcm);
241 }
242 }
243 spin_unlock_irqrestore(&atimer->list_lock, flags);
244}
245
246/* Handle timer-interrupt */
247static void ct_xfitimer_callback(struct ct_timer *atimer)
248{
249 int update;
250 unsigned long flags;
251
252 spin_lock_irqsave(&atimer->lock, flags);
253 atimer->irq_handling = 1;
254 do {
255 update = ct_xfitimer_reprogram(atimer);
256 spin_unlock(&atimer->lock);
257 if (update)
258 ct_xfitimer_check_period(atimer);
259 spin_lock(&atimer->lock);
260 } while (atimer->reprogram);
261 atimer->irq_handling = 0;
262 spin_unlock_irqrestore(&atimer->lock, flags);
263}
264
265static void ct_xfitimer_prepare(struct ct_timer_instance *ti)
266{
267 ti->frag_count = ti->substream->runtime->period_size;
268 ti->need_update = 0;
269}
270
271
272/* start/stop the timer */
273static void ct_xfitimer_update(struct ct_timer *atimer)
274{
275 unsigned long flags;
276 int update;
277
278 spin_lock_irqsave(&atimer->lock, flags);
279 if (atimer->irq_handling) {
280 /* reached from IRQ handler; let it handle later */
281 atimer->reprogram = 1;
282 spin_unlock_irqrestore(&atimer->lock, flags);
283 return;
284 }
285
286 ct_xfitimer_irq_stop(atimer);
287 update = ct_xfitimer_reprogram(atimer);
288 spin_unlock_irqrestore(&atimer->lock, flags);
289 if (update)
290 ct_xfitimer_check_period(atimer);
291}
292
293static void ct_xfitimer_start(struct ct_timer_instance *ti)
294{
295 struct ct_timer *atimer = ti->timer_base;
296 unsigned long flags;
297
298 spin_lock_irqsave(&atimer->lock, flags);
299 if (list_empty(&ti->running_list))
300 atimer->wc = ct_xfitimer_get_wc(atimer);
301 list_add(&ti->running_list, &atimer->running_head);
302 spin_unlock_irqrestore(&atimer->lock, flags);
303 ct_xfitimer_update(atimer);
304}
305
306static void ct_xfitimer_stop(struct ct_timer_instance *ti)
307{
308 struct ct_timer *atimer = ti->timer_base;
309 unsigned long flags;
310
311 spin_lock_irqsave(&atimer->lock, flags);
312 list_del_init(&ti->running_list);
313 ti->need_update = 0;
314 spin_unlock_irqrestore(&atimer->lock, flags);
315 ct_xfitimer_update(atimer);
316}
317
318static void ct_xfitimer_free_global(struct ct_timer *atimer)
319{
320 ct_xfitimer_irq_stop(atimer);
321}
322
323static struct ct_timer_ops ct_xfitimer_ops = {
324 .prepare = ct_xfitimer_prepare,
325 .start = ct_xfitimer_start,
326 .stop = ct_xfitimer_stop,
327 .interrupt = ct_xfitimer_callback,
328 .free_global = ct_xfitimer_free_global,
329};
330
331/*
332 * timer instance
333 */
334
335struct ct_timer_instance *
336ct_timer_instance_new(struct ct_timer *atimer, struct ct_atc_pcm *apcm)
337{
338 struct ct_timer_instance *ti;
339
340 ti = kzalloc(sizeof(*ti), GFP_KERNEL);
341 if (!ti)
342 return NULL;
343 spin_lock_init(&ti->lock);
344 INIT_LIST_HEAD(&ti->instance_list);
345 INIT_LIST_HEAD(&ti->running_list);
346 ti->timer_base = atimer;
347 ti->apcm = apcm;
348 ti->substream = apcm->substream;
349 if (atimer->ops->init)
350 atimer->ops->init(ti);
351
352 spin_lock_irq(&atimer->list_lock);
353 list_add(&ti->instance_list, &atimer->instance_head);
354 spin_unlock_irq(&atimer->list_lock);
355
356 return ti;
357}
358
359void ct_timer_prepare(struct ct_timer_instance *ti)
360{
361 if (ti->timer_base->ops->prepare)
362 ti->timer_base->ops->prepare(ti);
363 ti->position = 0;
364 ti->running = 0;
365}
366
367void ct_timer_start(struct ct_timer_instance *ti)
368{
369 struct ct_timer *atimer = ti->timer_base;
370 atimer->ops->start(ti);
371}
372
373void ct_timer_stop(struct ct_timer_instance *ti)
374{
375 struct ct_timer *atimer = ti->timer_base;
376 atimer->ops->stop(ti);
377}
378
379void ct_timer_instance_free(struct ct_timer_instance *ti)
380{
381 struct ct_timer *atimer = ti->timer_base;
382
383 atimer->ops->stop(ti); /* to be sure */
384 if (atimer->ops->free_instance)
385 atimer->ops->free_instance(ti);
386
387 spin_lock_irq(&atimer->list_lock);
388 list_del(&ti->instance_list);
389 spin_unlock_irq(&atimer->list_lock);
390
391 kfree(ti);
392}
393
394/*
395 * timer manager
396 */
397
398static void ct_timer_interrupt(void *data, unsigned int status)
399{
400 struct ct_timer *timer = data;
401
402 /* Interval timer interrupt */
403 if ((status & IT_INT) && timer->ops->interrupt)
404 timer->ops->interrupt(timer);
405}
406
407struct ct_timer *ct_timer_new(struct ct_atc *atc)
408{
409 struct ct_timer *atimer;
410 struct hw *hw;
411
412 atimer = kzalloc(sizeof(*atimer), GFP_KERNEL);
413 if (!atimer)
414 return NULL;
415 spin_lock_init(&atimer->lock);
416 spin_lock_init(&atimer->list_lock);
417 INIT_LIST_HEAD(&atimer->instance_head);
418 INIT_LIST_HEAD(&atimer->running_head);
419 atimer->atc = atc;
420 hw = atc->hw;
421 if (!use_system_timer && hw->set_timer_irq) {
422 snd_printd(KERN_INFO "ctxfi: Use xfi-native timer\n");
423 atimer->ops = &ct_xfitimer_ops;
424 hw->irq_callback_data = atimer;
425 hw->irq_callback = ct_timer_interrupt;
426 } else {
427 snd_printd(KERN_INFO "ctxfi: Use system timer\n");
428 atimer->ops = &ct_systimer_ops;
429 }
430 return atimer;
431}
432
433void ct_timer_free(struct ct_timer *atimer)
434{
435 struct hw *hw = atimer->atc->hw;
436 hw->irq_callback = NULL;
437 if (atimer->ops->free_global)
438 atimer->ops->free_global(atimer);
439 kfree(atimer);
440}
441
diff --git a/sound/pci/ctxfi/cttimer.h b/sound/pci/ctxfi/cttimer.h
new file mode 100644
index 000000000000..979348229291
--- /dev/null
+++ b/sound/pci/ctxfi/cttimer.h
@@ -0,0 +1,29 @@
1/*
2 * Timer handling
3 */
4
5#ifndef __CTTIMER_H
6#define __CTTIMER_H
7
8#include <linux/spinlock.h>
9#include <linux/timer.h>
10#include <linux/list.h>
11
12struct snd_pcm_substream;
13struct ct_atc;
14struct ct_atc_pcm;
15
16struct ct_timer;
17struct ct_timer_instance;
18
19struct ct_timer *ct_timer_new(struct ct_atc *atc);
20void ct_timer_free(struct ct_timer *atimer);
21
22struct ct_timer_instance *
23ct_timer_instance_new(struct ct_timer *atimer, struct ct_atc_pcm *apcm);
24void ct_timer_instance_free(struct ct_timer_instance *ti);
25void ct_timer_start(struct ct_timer_instance *ti);
26void ct_timer_stop(struct ct_timer_instance *ti);
27void ct_timer_prepare(struct ct_timer_instance *ti);
28
29#endif /* __CTTIMER_H */
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
new file mode 100644
index 000000000000..67665a7e43c6
--- /dev/null
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -0,0 +1,250 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctvmem.c
9 *
10 * @Brief
11 * This file contains the implementation of virtual memory management object
12 * for card device.
13 *
14 * @Author Liu Chun
15 * @Date Apr 1 2008
16 */
17
18#include "ctvmem.h"
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/io.h>
22#include <sound/pcm.h>
23
24#define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
25#define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
26
27/* *
28 * Find or create vm block based on requested @size.
29 * @size must be page aligned.
30 * */
31static struct ct_vm_block *
32get_vm_block(struct ct_vm *vm, unsigned int size)
33{
34 struct ct_vm_block *block = NULL, *entry;
35 struct list_head *pos;
36
37 size = CT_PAGE_ALIGN(size);
38 if (size > vm->size) {
39 printk(KERN_ERR "ctxfi: Fail! No sufficient device virtural "
40 "memory space available!\n");
41 return NULL;
42 }
43
44 mutex_lock(&vm->lock);
45 list_for_each(pos, &vm->unused) {
46 entry = list_entry(pos, struct ct_vm_block, list);
47 if (entry->size >= size)
48 break; /* found a block that is big enough */
49 }
50 if (pos == &vm->unused)
51 goto out;
52
53 if (entry->size == size) {
54 /* Move the vm node from unused list to used list directly */
55 list_del(&entry->list);
56 list_add(&entry->list, &vm->used);
57 vm->size -= size;
58 block = entry;
59 goto out;
60 }
61
62 block = kzalloc(sizeof(*block), GFP_KERNEL);
63 if (NULL == block)
64 goto out;
65
66 block->addr = entry->addr;
67 block->size = size;
68 list_add(&block->list, &vm->used);
69 entry->addr += size;
70 entry->size -= size;
71 vm->size -= size;
72
73 out:
74 mutex_unlock(&vm->lock);
75 return block;
76}
77
78static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
79{
80 struct ct_vm_block *entry, *pre_ent;
81 struct list_head *pos, *pre;
82
83 block->size = CT_PAGE_ALIGN(block->size);
84
85 mutex_lock(&vm->lock);
86 list_del(&block->list);
87 vm->size += block->size;
88
89 list_for_each(pos, &vm->unused) {
90 entry = list_entry(pos, struct ct_vm_block, list);
91 if (entry->addr >= (block->addr + block->size))
92 break; /* found a position */
93 }
94 if (pos == &vm->unused) {
95 list_add_tail(&block->list, &vm->unused);
96 entry = block;
97 } else {
98 if ((block->addr + block->size) == entry->addr) {
99 entry->addr = block->addr;
100 entry->size += block->size;
101 kfree(block);
102 } else {
103 __list_add(&block->list, pos->prev, pos);
104 entry = block;
105 }
106 }
107
108 pos = &entry->list;
109 pre = pos->prev;
110 while (pre != &vm->unused) {
111 entry = list_entry(pos, struct ct_vm_block, list);
112 pre_ent = list_entry(pre, struct ct_vm_block, list);
113 if ((pre_ent->addr + pre_ent->size) > entry->addr)
114 break;
115
116 pre_ent->size += entry->size;
117 list_del(pos);
118 kfree(entry);
119 pos = pre;
120 pre = pos->prev;
121 }
122 mutex_unlock(&vm->lock);
123}
124
125/* Map host addr (kmalloced/vmalloced) to device logical addr. */
126static struct ct_vm_block *
127ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
128{
129 struct ct_vm_block *block;
130 unsigned int pte_start;
131 unsigned i, pages;
132 unsigned long *ptp;
133
134 block = get_vm_block(vm, size);
135 if (block == NULL) {
136 printk(KERN_ERR "ctxfi: No virtual memory block that is big "
137 "enough to allocate!\n");
138 return NULL;
139 }
140
141 ptp = vm->ptp[0];
142 pte_start = (block->addr >> CT_PAGE_SHIFT);
143 pages = block->size >> CT_PAGE_SHIFT;
144 for (i = 0; i < pages; i++) {
145 unsigned long addr;
146 addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
147 ptp[pte_start + i] = addr;
148 }
149
150 block->size = size;
151 return block;
152}
153
154static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
155{
156 /* do unmapping */
157 put_vm_block(vm, block);
158}
159
160/* *
161 * return the host (kmalloced) addr of the @index-th device
162 * page talbe page on success, or NULL on failure.
163 * The first returned NULL indicates the termination.
164 * */
165static void *
166ct_get_ptp_virt(struct ct_vm *vm, int index)
167{
168 void *addr;
169
170 addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index];
171
172 return addr;
173}
174
175int ct_vm_create(struct ct_vm **rvm)
176{
177 struct ct_vm *vm;
178 struct ct_vm_block *block;
179 int i;
180
181 *rvm = NULL;
182
183 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
184 if (NULL == vm)
185 return -ENOMEM;
186
187 mutex_init(&vm->lock);
188
189 /* Allocate page table pages */
190 for (i = 0; i < CT_PTP_NUM; i++) {
191 vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
192 if (NULL == vm->ptp[i])
193 break;
194 }
195 if (!i) {
196 /* no page table pages are allocated */
197 kfree(vm);
198 return -ENOMEM;
199 }
200 vm->size = CT_ADDRS_PER_PAGE * i;
201 /* Initialise remaining ptps */
202 for (; i < CT_PTP_NUM; i++)
203 vm->ptp[i] = NULL;
204
205 vm->map = ct_vm_map;
206 vm->unmap = ct_vm_unmap;
207 vm->get_ptp_virt = ct_get_ptp_virt;
208 INIT_LIST_HEAD(&vm->unused);
209 INIT_LIST_HEAD(&vm->used);
210 block = kzalloc(sizeof(*block), GFP_KERNEL);
211 if (NULL != block) {
212 block->addr = 0;
213 block->size = vm->size;
214 list_add(&block->list, &vm->unused);
215 }
216
217 *rvm = vm;
218 return 0;
219}
220
221/* The caller must ensure no mapping pages are being used
222 * by hardware before calling this function */
223void ct_vm_destroy(struct ct_vm *vm)
224{
225 int i;
226 struct list_head *pos;
227 struct ct_vm_block *entry;
228
229 /* free used and unused list nodes */
230 while (!list_empty(&vm->used)) {
231 pos = vm->used.next;
232 list_del(pos);
233 entry = list_entry(pos, struct ct_vm_block, list);
234 kfree(entry);
235 }
236 while (!list_empty(&vm->unused)) {
237 pos = vm->unused.next;
238 list_del(pos);
239 entry = list_entry(pos, struct ct_vm_block, list);
240 kfree(entry);
241 }
242
243 /* free allocated page table pages */
244 for (i = 0; i < CT_PTP_NUM; i++)
245 kfree(vm->ptp[i]);
246
247 vm->size = 0;
248
249 kfree(vm);
250}
diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h
new file mode 100644
index 000000000000..01e4fd0386a3
--- /dev/null
+++ b/sound/pci/ctxfi/ctvmem.h
@@ -0,0 +1,61 @@
1/**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctvmem.h
9 *
10 * @Brief
11 * This file contains the definition of virtual memory management object
12 * for card device.
13 *
14 * @Author Liu Chun
15 * @Date Mar 28 2008
16 */
17
18#ifndef CTVMEM_H
19#define CTVMEM_H
20
21#define CT_PTP_NUM 1 /* num of device page table pages */
22
23#include <linux/mutex.h>
24#include <linux/list.h>
25
26/* The chip can handle the page table of 4k pages
27 * (emu20k1 can handle even 8k pages, but we don't use it right now)
28 */
29#define CT_PAGE_SIZE 4096
30#define CT_PAGE_SHIFT 12
31#define CT_PAGE_MASK (~(PAGE_SIZE - 1))
32#define CT_PAGE_ALIGN(addr) ALIGN(addr, CT_PAGE_SIZE)
33
34struct ct_vm_block {
35 unsigned int addr; /* starting logical addr of this block */
36 unsigned int size; /* size of this device virtual mem block */
37 struct list_head list;
38};
39
40struct snd_pcm_substream;
41
42/* Virtual memory management object for card device */
43struct ct_vm {
44 void *ptp[CT_PTP_NUM]; /* Device page table pages */
45 unsigned int size; /* Available addr space in bytes */
46 struct list_head unused; /* List of unused blocks */
47 struct list_head used; /* List of used blocks */
48 struct mutex lock;
49
50 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
51 struct ct_vm_block *(*map)(struct ct_vm *, struct snd_pcm_substream *,
52 int size);
53 /* Unmap device logical addr area. */
54 void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
55 void *(*get_ptp_virt)(struct ct_vm *vm, int index);
56};
57
58int ct_vm_create(struct ct_vm **rvm);
59void ct_vm_destroy(struct ct_vm *vm);
60
61#endif /* CTVMEM_H */
diff --git a/sound/pci/ctxfi/xfi.c b/sound/pci/ctxfi/xfi.c
new file mode 100644
index 000000000000..2d3dd89af151
--- /dev/null
+++ b/sound/pci/ctxfi/xfi.c
@@ -0,0 +1,142 @@
1/*
2 * xfi linux driver.
3 *
4 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
5 *
6 * This source file is released under GPL v2 license (no other versions).
7 * See the COPYING file included in the main directory of this source
8 * distribution for the license terms and conditions.
9 */
10
11#include <linux/init.h>
12#include <linux/pci.h>
13#include <linux/moduleparam.h>
14#include <linux/pci_ids.h>
15#include <sound/core.h>
16#include <sound/initval.h>
17#include "ctatc.h"
18#include "cthardware.h"
19
20MODULE_AUTHOR("Creative Technology Ltd");
21MODULE_DESCRIPTION("X-Fi driver version 1.03");
22MODULE_LICENSE("GPL v2");
23MODULE_SUPPORTED_DEVICE("{{Creative Labs, Sound Blaster X-Fi}");
24
25static unsigned int reference_rate = 48000;
26static unsigned int multiple = 2;
27MODULE_PARM_DESC(reference_rate, "Reference rate (default=48000)");
28module_param(reference_rate, uint, S_IRUGO);
29MODULE_PARM_DESC(multiple, "Rate multiplier (default=2)");
30module_param(multiple, uint, S_IRUGO);
31
32static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
33static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
34static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
35
36module_param_array(index, int, NULL, 0444);
37MODULE_PARM_DESC(index, "Index value for Creative X-Fi driver");
38module_param_array(id, charp, NULL, 0444);
39MODULE_PARM_DESC(id, "ID string for Creative X-Fi driver");
40module_param_array(enable, bool, NULL, 0444);
41MODULE_PARM_DESC(enable, "Enable Creative X-Fi driver");
42
43static struct pci_device_id ct_pci_dev_ids[] = {
44 /* only X-Fi is supported, so... */
45 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K1),
46 .driver_data = ATC20K1,
47 },
48 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2),
49 .driver_data = ATC20K2,
50 },
51 { 0, }
52};
53MODULE_DEVICE_TABLE(pci, ct_pci_dev_ids);
54
55static int __devinit
56ct_card_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
57{
58 static int dev;
59 struct snd_card *card;
60 struct ct_atc *atc;
61 int err;
62
63 if (dev >= SNDRV_CARDS)
64 return -ENODEV;
65
66 if (!enable[dev]) {
67 dev++;
68 return -ENOENT;
69 }
70 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
71 if (err)
72 return err;
73 if ((reference_rate != 48000) && (reference_rate != 44100)) {
74 printk(KERN_ERR "ctxfi: Invalid reference_rate value %u!!!\n",
75 reference_rate);
76 printk(KERN_ERR "ctxfi: The valid values for reference_rate "
77 "are 48000 and 44100, Value 48000 is assumed.\n");
78 reference_rate = 48000;
79 }
80 if ((multiple != 1) && (multiple != 2)) {
81 printk(KERN_ERR "ctxfi: Invalid multiple value %u!!!\n",
82 multiple);
83 printk(KERN_ERR "ctxfi: The valid values for multiple are "
84 "1 and 2, Value 2 is assumed.\n");
85 multiple = 2;
86 }
87 err = ct_atc_create(card, pci, reference_rate, multiple,
88 pci_id->driver_data, &atc);
89 if (err < 0)
90 goto error;
91
92 card->private_data = atc;
93
94 /* Create alsa devices supported by this card */
95 err = ct_atc_create_alsa_devs(atc);
96 if (err < 0)
97 goto error;
98
99 strcpy(card->driver, "SB-XFi");
100 strcpy(card->shortname, "Creative X-Fi");
101 snprintf(card->longname, sizeof(card->longname), "%s %s %s",
102 card->shortname, atc->chip_name, atc->model_name);
103
104 err = snd_card_register(card);
105 if (err < 0)
106 goto error;
107
108 pci_set_drvdata(pci, card);
109 dev++;
110
111 return 0;
112
113error:
114 snd_card_free(card);
115 return err;
116}
117
118static void __devexit ct_card_remove(struct pci_dev *pci)
119{
120 snd_card_free(pci_get_drvdata(pci));
121 pci_set_drvdata(pci, NULL);
122}
123
124static struct pci_driver ct_driver = {
125 .name = "SB-XFi",
126 .id_table = ct_pci_dev_ids,
127 .probe = ct_card_probe,
128 .remove = __devexit_p(ct_card_remove),
129};
130
131static int __init ct_card_init(void)
132{
133 return pci_register_driver(&ct_driver);
134}
135
136static void __exit ct_card_exit(void)
137{
138 pci_unregister_driver(&ct_driver);
139}
140
141module_init(ct_card_init)
142module_exit(ct_card_exit)
diff --git a/sound/pci/emu10k1/Makefile b/sound/pci/emu10k1/Makefile
index cf2d5636d8be..fc5591e7777e 100644
--- a/sound/pci/emu10k1/Makefile
+++ b/sound/pci/emu10k1/Makefile
@@ -9,15 +9,7 @@ snd-emu10k1-objs := emu10k1.o emu10k1_main.o \
9snd-emu10k1-synth-objs := emu10k1_synth.o emu10k1_callback.o emu10k1_patch.o 9snd-emu10k1-synth-objs := emu10k1_synth.o emu10k1_callback.o emu10k1_patch.o
10snd-emu10k1x-objs := emu10k1x.o 10snd-emu10k1x-objs := emu10k1x.o
11 11
12#
13# this function returns:
14# "m" - CONFIG_SND_SEQUENCER is m
15# <empty string> - CONFIG_SND_SEQUENCER is undefined
16# otherwise parameter #1 value
17#
18sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
19
20# Toplevel Module Dependency 12# Toplevel Module Dependency
21obj-$(CONFIG_SND_EMU10K1) += snd-emu10k1.o 13obj-$(CONFIG_SND_EMU10K1) += snd-emu10k1.o
22obj-$(call sequencer,$(CONFIG_SND_EMU10K1)) += snd-emu10k1-synth.o 14obj-$(CONFIG_SND_EMU10K1_SEQ) += snd-emu10k1-synth.o
23obj-$(CONFIG_SND_EMU10K1X) += snd-emu10k1x.o 15obj-$(CONFIG_SND_EMU10K1X) += snd-emu10k1x.o
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 1970f0e70f37..4d3ad793e98f 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -858,7 +858,6 @@ static int __devinit snd_emu10k1x_pcm(struct emu10k1x *emu, int device, struct s
858 } 858 }
859 859
860 pcm->info_flags = 0; 860 pcm->info_flags = 0;
861 pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX;
862 switch(device) { 861 switch(device) {
863 case 0: 862 case 0:
864 strcpy(pcm->name, "EMU10K1X Front"); 863 strcpy(pcm->name, "EMU10K1X Front");
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 78f62fd404c2..55b83ef73c63 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1736,7 +1736,7 @@ static struct snd_pcm_hardware snd_emu10k1_fx8010_playback =
1736 .buffer_bytes_max = (128*1024), 1736 .buffer_bytes_max = (128*1024),
1737 .period_bytes_min = 1024, 1737 .period_bytes_min = 1024,
1738 .period_bytes_max = (128*1024), 1738 .period_bytes_max = (128*1024),
1739 .periods_min = 1, 1739 .periods_min = 2,
1740 .periods_max = 1024, 1740 .periods_max = 1024,
1741 .fifo_size = 0, 1741 .fifo_size = 0,
1742}; 1742};
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index eb2a19b894a0..c710150d5065 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -139,6 +139,19 @@ config SND_HDA_CODEC_CONEXANT
139 snd-hda-codec-conexant. 139 snd-hda-codec-conexant.
140 This module is automatically loaded at probing. 140 This module is automatically loaded at probing.
141 141
142config SND_HDA_CODEC_CA0110
143 bool "Build Creative CA0110-IBG codec support"
144 depends on SND_HDA_INTEL
145 default y
146 help
147 Say Y here to include Creative CA0110-IBG codec support in
148 snd-hda-intel driver, found on some Creative X-Fi cards.
149
150 When the HD-audio driver is built as a module, the codec
151 support code is also built as another module,
152 snd-hda-codec-ca0110.
153 This module is automatically loaded at probing.
154
142config SND_HDA_CODEC_CMEDIA 155config SND_HDA_CODEC_CMEDIA
143 bool "Build C-Media HD-audio codec support" 156 bool "Build C-Media HD-audio codec support"
144 default y 157 default y
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index 50f9d0967251..e3081d4586cc 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -13,6 +13,7 @@ snd-hda-codec-analog-objs := patch_analog.o
13snd-hda-codec-idt-objs := patch_sigmatel.o 13snd-hda-codec-idt-objs := patch_sigmatel.o
14snd-hda-codec-si3054-objs := patch_si3054.o 14snd-hda-codec-si3054-objs := patch_si3054.o
15snd-hda-codec-atihdmi-objs := patch_atihdmi.o 15snd-hda-codec-atihdmi-objs := patch_atihdmi.o
16snd-hda-codec-ca0110-objs := patch_ca0110.o
16snd-hda-codec-conexant-objs := patch_conexant.o 17snd-hda-codec-conexant-objs := patch_conexant.o
17snd-hda-codec-via-objs := patch_via.o 18snd-hda-codec-via-objs := patch_via.o
18snd-hda-codec-nvhdmi-objs := patch_nvhdmi.o 19snd-hda-codec-nvhdmi-objs := patch_nvhdmi.o
@@ -40,6 +41,9 @@ endif
40ifdef CONFIG_SND_HDA_CODEC_ATIHDMI 41ifdef CONFIG_SND_HDA_CODEC_ATIHDMI
41obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-atihdmi.o 42obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-atihdmi.o
42endif 43endif
44ifdef CONFIG_SND_HDA_CODEC_CA0110
45obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-ca0110.o
46endif
43ifdef CONFIG_SND_HDA_CODEC_CONEXANT 47ifdef CONFIG_SND_HDA_CODEC_CONEXANT
44obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-conexant.o 48obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-conexant.o
45endif 49endif
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 4de5bacd3929..29272f2e95a0 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -45,6 +45,46 @@ static void snd_hda_generate_beep(struct work_struct *work)
45 AC_VERB_SET_BEEP_CONTROL, beep->tone); 45 AC_VERB_SET_BEEP_CONTROL, beep->tone);
46} 46}
47 47
48/* (non-standard) Linear beep tone calculation for IDT/STAC codecs
49 *
50 * The tone frequency of beep generator on IDT/STAC codecs is
51 * defined from the 8bit tone parameter, in Hz,
52 * freq = 48000 * (257 - tone) / 1024
53 * that is from 12kHz to 93.75kHz in step of 46.875 hz
54 */
55static int beep_linear_tone(struct hda_beep *beep, int hz)
56{
57 hz *= 1000; /* fixed point */
58 hz = hz - DIGBEEP_HZ_MIN;
59 if (hz < 0)
60 hz = 0; /* turn off PC beep*/
61 else if (hz >= (DIGBEEP_HZ_MAX - DIGBEEP_HZ_MIN))
62 hz = 0xff;
63 else {
64 hz /= DIGBEEP_HZ_STEP;
65 hz++;
66 }
67 return hz;
68}
69
70/* HD-audio standard beep tone parameter calculation
71 *
72 * The tone frequency in Hz is calculated as
73 * freq = 48000 / (tone * 4)
74 * from 47Hz to 12kHz
75 */
76static int beep_standard_tone(struct hda_beep *beep, int hz)
77{
78 if (hz <= 0)
79 return 0; /* disabled */
80 hz = 12000 / hz;
81 if (hz > 0xff)
82 return 0xff;
83 if (hz <= 0)
84 return 1;
85 return hz;
86}
87
48static int snd_hda_beep_event(struct input_dev *dev, unsigned int type, 88static int snd_hda_beep_event(struct input_dev *dev, unsigned int type,
49 unsigned int code, int hz) 89 unsigned int code, int hz)
50{ 90{
@@ -55,21 +95,14 @@ static int snd_hda_beep_event(struct input_dev *dev, unsigned int type,
55 if (hz) 95 if (hz)
56 hz = 1000; 96 hz = 1000;
57 case SND_TONE: 97 case SND_TONE:
58 hz *= 1000; /* fixed point */ 98 if (beep->linear_tone)
59 hz = hz - DIGBEEP_HZ_MIN; 99 beep->tone = beep_linear_tone(beep, hz);
60 if (hz < 0) 100 else
61 hz = 0; /* turn off PC beep*/ 101 beep->tone = beep_standard_tone(beep, hz);
62 else if (hz >= (DIGBEEP_HZ_MAX - DIGBEEP_HZ_MIN))
63 hz = 0xff;
64 else {
65 hz /= DIGBEEP_HZ_STEP;
66 hz++;
67 }
68 break; 102 break;
69 default: 103 default:
70 return -1; 104 return -1;
71 } 105 }
72 beep->tone = hz;
73 106
74 /* schedule beep event */ 107 /* schedule beep event */
75 schedule_work(&beep->beep_work); 108 schedule_work(&beep->beep_work);
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index 51bf6a5daf39..0c3de787c717 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -30,8 +30,9 @@ struct hda_beep {
30 struct hda_codec *codec; 30 struct hda_codec *codec;
31 char phys[32]; 31 char phys[32];
32 int tone; 32 int tone;
33 int nid; 33 hda_nid_t nid;
34 int enabled; 34 unsigned int enabled:1;
35 unsigned int linear_tone:1; /* linear tone for IDT/STAC codec */
35 struct work_struct beep_work; /* scheduled task for beep event */ 36 struct work_struct beep_work; /* scheduled task for beep event */
36}; 37};
37 38
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 8820faf6c9d8..562403a23488 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -48,6 +48,7 @@ static struct hda_vendor_id hda_vendor_ids[] = {
48 { 0x1095, "Silicon Image" }, 48 { 0x1095, "Silicon Image" },
49 { 0x10de, "Nvidia" }, 49 { 0x10de, "Nvidia" },
50 { 0x10ec, "Realtek" }, 50 { 0x10ec, "Realtek" },
51 { 0x1102, "Creative" },
51 { 0x1106, "VIA" }, 52 { 0x1106, "VIA" },
52 { 0x111d, "IDT" }, 53 { 0x111d, "IDT" },
53 { 0x11c1, "LSI" }, 54 { 0x11c1, "LSI" },
@@ -157,6 +158,39 @@ make_codec_cmd(struct hda_codec *codec, hda_nid_t nid, int direct,
157 return val; 158 return val;
158} 159}
159 160
161/*
162 * Send and receive a verb
163 */
164static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd,
165 unsigned int *res)
166{
167 struct hda_bus *bus = codec->bus;
168 int err;
169
170 if (res)
171 *res = -1;
172 again:
173 snd_hda_power_up(codec);
174 mutex_lock(&bus->cmd_mutex);
175 err = bus->ops.command(bus, cmd);
176 if (!err && res)
177 *res = bus->ops.get_response(bus);
178 mutex_unlock(&bus->cmd_mutex);
179 snd_hda_power_down(codec);
180 if (res && *res == -1 && bus->rirb_error) {
181 if (bus->response_reset) {
182 snd_printd("hda_codec: resetting BUS due to "
183 "fatal communication error\n");
184 bus->ops.bus_reset(bus);
185 }
186 goto again;
187 }
188 /* clear reset-flag when the communication gets recovered */
189 if (!err)
190 bus->response_reset = 0;
191 return err;
192}
193
160/** 194/**
161 * snd_hda_codec_read - send a command and get the response 195 * snd_hda_codec_read - send a command and get the response
162 * @codec: the HDA codec 196 * @codec: the HDA codec
@@ -173,18 +207,9 @@ unsigned int snd_hda_codec_read(struct hda_codec *codec, hda_nid_t nid,
173 int direct, 207 int direct,
174 unsigned int verb, unsigned int parm) 208 unsigned int verb, unsigned int parm)
175{ 209{
176 struct hda_bus *bus = codec->bus; 210 unsigned cmd = make_codec_cmd(codec, nid, direct, verb, parm);
177 unsigned int res; 211 unsigned int res;
178 212 codec_exec_verb(codec, cmd, &res);
179 res = make_codec_cmd(codec, nid, direct, verb, parm);
180 snd_hda_power_up(codec);
181 mutex_lock(&bus->cmd_mutex);
182 if (!bus->ops.command(bus, res))
183 res = bus->ops.get_response(bus);
184 else
185 res = (unsigned int)-1;
186 mutex_unlock(&bus->cmd_mutex);
187 snd_hda_power_down(codec);
188 return res; 213 return res;
189} 214}
190EXPORT_SYMBOL_HDA(snd_hda_codec_read); 215EXPORT_SYMBOL_HDA(snd_hda_codec_read);
@@ -204,17 +229,10 @@ EXPORT_SYMBOL_HDA(snd_hda_codec_read);
204int snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int direct, 229int snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int direct,
205 unsigned int verb, unsigned int parm) 230 unsigned int verb, unsigned int parm)
206{ 231{
207 struct hda_bus *bus = codec->bus; 232 unsigned int cmd = make_codec_cmd(codec, nid, direct, verb, parm);
208 unsigned int res; 233 unsigned int res;
209 int err; 234 return codec_exec_verb(codec, cmd,
210 235 codec->bus->sync_write ? &res : NULL);
211 res = make_codec_cmd(codec, nid, direct, verb, parm);
212 snd_hda_power_up(codec);
213 mutex_lock(&bus->cmd_mutex);
214 err = bus->ops.command(bus, res);
215 mutex_unlock(&bus->cmd_mutex);
216 snd_hda_power_down(codec);
217 return err;
218} 236}
219EXPORT_SYMBOL_HDA(snd_hda_codec_write); 237EXPORT_SYMBOL_HDA(snd_hda_codec_write);
220 238
@@ -613,7 +631,10 @@ static int get_codec_name(struct hda_codec *codec)
613 const struct hda_vendor_id *c; 631 const struct hda_vendor_id *c;
614 const char *vendor = NULL; 632 const char *vendor = NULL;
615 u16 vendor_id = codec->vendor_id >> 16; 633 u16 vendor_id = codec->vendor_id >> 16;
616 char tmp[16], name[32]; 634 char tmp[16];
635
636 if (codec->vendor_name)
637 goto get_chip_name;
617 638
618 for (c = hda_vendor_ids; c->id; c++) { 639 for (c = hda_vendor_ids; c->id; c++) {
619 if (c->id == vendor_id) { 640 if (c->id == vendor_id) {
@@ -625,14 +646,21 @@ static int get_codec_name(struct hda_codec *codec)
625 sprintf(tmp, "Generic %04x", vendor_id); 646 sprintf(tmp, "Generic %04x", vendor_id);
626 vendor = tmp; 647 vendor = tmp;
627 } 648 }
649 codec->vendor_name = kstrdup(vendor, GFP_KERNEL);
650 if (!codec->vendor_name)
651 return -ENOMEM;
652
653 get_chip_name:
654 if (codec->chip_name)
655 return 0;
656
628 if (codec->preset && codec->preset->name) 657 if (codec->preset && codec->preset->name)
629 snprintf(name, sizeof(name), "%s %s", vendor, 658 codec->chip_name = kstrdup(codec->preset->name, GFP_KERNEL);
630 codec->preset->name); 659 else {
631 else 660 sprintf(tmp, "ID %x", codec->vendor_id & 0xffff);
632 snprintf(name, sizeof(name), "%s ID %x", vendor, 661 codec->chip_name = kstrdup(tmp, GFP_KERNEL);
633 codec->vendor_id & 0xffff); 662 }
634 codec->name = kstrdup(name, GFP_KERNEL); 663 if (!codec->chip_name)
635 if (!codec->name)
636 return -ENOMEM; 664 return -ENOMEM;
637 return 0; 665 return 0;
638} 666}
@@ -838,7 +866,8 @@ static void snd_hda_codec_free(struct hda_codec *codec)
838 module_put(codec->owner); 866 module_put(codec->owner);
839 free_hda_cache(&codec->amp_cache); 867 free_hda_cache(&codec->amp_cache);
840 free_hda_cache(&codec->cmd_cache); 868 free_hda_cache(&codec->cmd_cache);
841 kfree(codec->name); 869 kfree(codec->vendor_name);
870 kfree(codec->chip_name);
842 kfree(codec->modelname); 871 kfree(codec->modelname);
843 kfree(codec->wcaps); 872 kfree(codec->wcaps);
844 kfree(codec); 873 kfree(codec);
@@ -979,15 +1008,16 @@ int snd_hda_codec_configure(struct hda_codec *codec)
979 int err; 1008 int err;
980 1009
981 codec->preset = find_codec_preset(codec); 1010 codec->preset = find_codec_preset(codec);
982 if (!codec->name) { 1011 if (!codec->vendor_name || !codec->chip_name) {
983 err = get_codec_name(codec); 1012 err = get_codec_name(codec);
984 if (err < 0) 1013 if (err < 0)
985 return err; 1014 return err;
986 } 1015 }
987 /* audio codec should override the mixer name */ 1016 /* audio codec should override the mixer name */
988 if (codec->afg || !*codec->bus->card->mixername) 1017 if (codec->afg || !*codec->bus->card->mixername)
989 strlcpy(codec->bus->card->mixername, codec->name, 1018 snprintf(codec->bus->card->mixername,
990 sizeof(codec->bus->card->mixername)); 1019 sizeof(codec->bus->card->mixername),
1020 "%s %s", codec->vendor_name, codec->chip_name);
991 1021
992 if (is_generic_config(codec)) { 1022 if (is_generic_config(codec)) {
993 err = snd_hda_parse_generic_codec(codec); 1023 err = snd_hda_parse_generic_codec(codec);
@@ -1055,6 +1085,8 @@ EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup_stream);
1055/* FIXME: more better hash key? */ 1085/* FIXME: more better hash key? */
1056#define HDA_HASH_KEY(nid,dir,idx) (u32)((nid) + ((idx) << 16) + ((dir) << 24)) 1086#define HDA_HASH_KEY(nid,dir,idx) (u32)((nid) + ((idx) << 16) + ((dir) << 24))
1057#define HDA_HASH_PINCAP_KEY(nid) (u32)((nid) + (0x02 << 24)) 1087#define HDA_HASH_PINCAP_KEY(nid) (u32)((nid) + (0x02 << 24))
1088#define HDA_HASH_PARPCM_KEY(nid) (u32)((nid) + (0x03 << 24))
1089#define HDA_HASH_PARSTR_KEY(nid) (u32)((nid) + (0x04 << 24))
1058#define INFO_AMP_CAPS (1<<0) 1090#define INFO_AMP_CAPS (1<<0)
1059#define INFO_AMP_VOL(ch) (1 << (1 + (ch))) 1091#define INFO_AMP_VOL(ch) (1 << (1 + (ch)))
1060 1092
@@ -1145,19 +1177,32 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
1145} 1177}
1146EXPORT_SYMBOL_HDA(snd_hda_override_amp_caps); 1178EXPORT_SYMBOL_HDA(snd_hda_override_amp_caps);
1147 1179
1148u32 snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid) 1180static unsigned int
1181query_caps_hash(struct hda_codec *codec, hda_nid_t nid, u32 key,
1182 unsigned int (*func)(struct hda_codec *, hda_nid_t))
1149{ 1183{
1150 struct hda_amp_info *info; 1184 struct hda_amp_info *info;
1151 1185
1152 info = get_alloc_amp_hash(codec, HDA_HASH_PINCAP_KEY(nid)); 1186 info = get_alloc_amp_hash(codec, key);
1153 if (!info) 1187 if (!info)
1154 return 0; 1188 return 0;
1155 if (!info->head.val) { 1189 if (!info->head.val) {
1156 info->amp_caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
1157 info->head.val |= INFO_AMP_CAPS; 1190 info->head.val |= INFO_AMP_CAPS;
1191 info->amp_caps = func(codec, nid);
1158 } 1192 }
1159 return info->amp_caps; 1193 return info->amp_caps;
1160} 1194}
1195
1196static unsigned int read_pin_cap(struct hda_codec *codec, hda_nid_t nid)
1197{
1198 return snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
1199}
1200
1201u32 snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid)
1202{
1203 return query_caps_hash(codec, nid, HDA_HASH_PINCAP_KEY(nid),
1204 read_pin_cap);
1205}
1161EXPORT_SYMBOL_HDA(snd_hda_query_pin_caps); 1206EXPORT_SYMBOL_HDA(snd_hda_query_pin_caps);
1162 1207
1163/* 1208/*
@@ -1432,6 +1477,8 @@ _snd_hda_find_mixer_ctl(struct hda_codec *codec,
1432 memset(&id, 0, sizeof(id)); 1477 memset(&id, 0, sizeof(id));
1433 id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1478 id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
1434 id.index = idx; 1479 id.index = idx;
1480 if (snd_BUG_ON(strlen(name) >= sizeof(id.name)))
1481 return NULL;
1435 strcpy(id.name, name); 1482 strcpy(id.name, name);
1436 return snd_ctl_find_id(codec->bus->card, &id); 1483 return snd_ctl_find_id(codec->bus->card, &id);
1437} 1484}
@@ -2242,28 +2289,22 @@ EXPORT_SYMBOL_HDA(snd_hda_create_spdif_in_ctls);
2242int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid, 2289int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
2243 int direct, unsigned int verb, unsigned int parm) 2290 int direct, unsigned int verb, unsigned int parm)
2244{ 2291{
2245 struct hda_bus *bus = codec->bus; 2292 int err = snd_hda_codec_write(codec, nid, direct, verb, parm);
2246 unsigned int res; 2293 struct hda_cache_head *c;
2247 int err; 2294 u32 key;
2248 2295
2249 res = make_codec_cmd(codec, nid, direct, verb, parm); 2296 if (err < 0)
2250 snd_hda_power_up(codec); 2297 return err;
2251 mutex_lock(&bus->cmd_mutex); 2298 /* parm may contain the verb stuff for get/set amp */
2252 err = bus->ops.command(bus, res); 2299 verb = verb | (parm >> 8);
2253 if (!err) { 2300 parm &= 0xff;
2254 struct hda_cache_head *c; 2301 key = build_cmd_cache_key(nid, verb);
2255 u32 key; 2302 mutex_lock(&codec->bus->cmd_mutex);
2256 /* parm may contain the verb stuff for get/set amp */ 2303 c = get_alloc_hash(&codec->cmd_cache, key);
2257 verb = verb | (parm >> 8); 2304 if (c)
2258 parm &= 0xff; 2305 c->val = parm;
2259 key = build_cmd_cache_key(nid, verb); 2306 mutex_unlock(&codec->bus->cmd_mutex);
2260 c = get_alloc_hash(&codec->cmd_cache, key); 2307 return 0;
2261 if (c)
2262 c->val = parm;
2263 }
2264 mutex_unlock(&bus->cmd_mutex);
2265 snd_hda_power_down(codec);
2266 return err;
2267} 2308}
2268EXPORT_SYMBOL_HDA(snd_hda_codec_write_cache); 2309EXPORT_SYMBOL_HDA(snd_hda_codec_write_cache);
2269 2310
@@ -2321,7 +2362,8 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
2321 if (wcaps & AC_WCAP_POWER) { 2362 if (wcaps & AC_WCAP_POWER) {
2322 unsigned int wid_type = (wcaps & AC_WCAP_TYPE) >> 2363 unsigned int wid_type = (wcaps & AC_WCAP_TYPE) >>
2323 AC_WCAP_TYPE_SHIFT; 2364 AC_WCAP_TYPE_SHIFT;
2324 if (wid_type == AC_WID_PIN) { 2365 if (power_state == AC_PWRST_D3 &&
2366 wid_type == AC_WID_PIN) {
2325 unsigned int pincap; 2367 unsigned int pincap;
2326 /* 2368 /*
2327 * don't power down the widget if it controls 2369 * don't power down the widget if it controls
@@ -2333,7 +2375,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
2333 nid, 0, 2375 nid, 0,
2334 AC_VERB_GET_EAPD_BTLENABLE, 0); 2376 AC_VERB_GET_EAPD_BTLENABLE, 0);
2335 eapd &= 0x02; 2377 eapd &= 0x02;
2336 if (power_state == AC_PWRST_D3 && eapd) 2378 if (eapd)
2337 continue; 2379 continue;
2338 } 2380 }
2339 } 2381 }
@@ -2544,6 +2586,41 @@ unsigned int snd_hda_calc_stream_format(unsigned int rate,
2544} 2586}
2545EXPORT_SYMBOL_HDA(snd_hda_calc_stream_format); 2587EXPORT_SYMBOL_HDA(snd_hda_calc_stream_format);
2546 2588
2589static unsigned int get_pcm_param(struct hda_codec *codec, hda_nid_t nid)
2590{
2591 unsigned int val = 0;
2592 if (nid != codec->afg &&
2593 (get_wcaps(codec, nid) & AC_WCAP_FORMAT_OVRD))
2594 val = snd_hda_param_read(codec, nid, AC_PAR_PCM);
2595 if (!val || val == -1)
2596 val = snd_hda_param_read(codec, codec->afg, AC_PAR_PCM);
2597 if (!val || val == -1)
2598 return 0;
2599 return val;
2600}
2601
2602static unsigned int query_pcm_param(struct hda_codec *codec, hda_nid_t nid)
2603{
2604 return query_caps_hash(codec, nid, HDA_HASH_PARPCM_KEY(nid),
2605 get_pcm_param);
2606}
2607
2608static unsigned int get_stream_param(struct hda_codec *codec, hda_nid_t nid)
2609{
2610 unsigned int streams = snd_hda_param_read(codec, nid, AC_PAR_STREAM);
2611 if (!streams || streams == -1)
2612 streams = snd_hda_param_read(codec, codec->afg, AC_PAR_STREAM);
2613 if (!streams || streams == -1)
2614 return 0;
2615 return streams;
2616}
2617
2618static unsigned int query_stream_param(struct hda_codec *codec, hda_nid_t nid)
2619{
2620 return query_caps_hash(codec, nid, HDA_HASH_PARSTR_KEY(nid),
2621 get_stream_param);
2622}
2623
2547/** 2624/**
2548 * snd_hda_query_supported_pcm - query the supported PCM rates and formats 2625 * snd_hda_query_supported_pcm - query the supported PCM rates and formats
2549 * @codec: the HDA codec 2626 * @codec: the HDA codec
@@ -2562,15 +2639,8 @@ static int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
2562{ 2639{
2563 unsigned int i, val, wcaps; 2640 unsigned int i, val, wcaps;
2564 2641
2565 val = 0;
2566 wcaps = get_wcaps(codec, nid); 2642 wcaps = get_wcaps(codec, nid);
2567 if (nid != codec->afg && (wcaps & AC_WCAP_FORMAT_OVRD)) { 2643 val = query_pcm_param(codec, nid);
2568 val = snd_hda_param_read(codec, nid, AC_PAR_PCM);
2569 if (val == -1)
2570 return -EIO;
2571 }
2572 if (!val)
2573 val = snd_hda_param_read(codec, codec->afg, AC_PAR_PCM);
2574 2644
2575 if (ratesp) { 2645 if (ratesp) {
2576 u32 rates = 0; 2646 u32 rates = 0;
@@ -2592,15 +2662,9 @@ static int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
2592 u64 formats = 0; 2662 u64 formats = 0;
2593 unsigned int streams, bps; 2663 unsigned int streams, bps;
2594 2664
2595 streams = snd_hda_param_read(codec, nid, AC_PAR_STREAM); 2665 streams = query_stream_param(codec, nid);
2596 if (streams == -1) 2666 if (!streams)
2597 return -EIO; 2667 return -EIO;
2598 if (!streams) {
2599 streams = snd_hda_param_read(codec, codec->afg,
2600 AC_PAR_STREAM);
2601 if (streams == -1)
2602 return -EIO;
2603 }
2604 2668
2605 bps = 0; 2669 bps = 0;
2606 if (streams & AC_SUPFMT_PCM) { 2670 if (streams & AC_SUPFMT_PCM) {
@@ -2674,17 +2738,9 @@ int snd_hda_is_supported_format(struct hda_codec *codec, hda_nid_t nid,
2674 int i; 2738 int i;
2675 unsigned int val = 0, rate, stream; 2739 unsigned int val = 0, rate, stream;
2676 2740
2677 if (nid != codec->afg && 2741 val = query_pcm_param(codec, nid);
2678 (get_wcaps(codec, nid) & AC_WCAP_FORMAT_OVRD)) { 2742 if (!val)
2679 val = snd_hda_param_read(codec, nid, AC_PAR_PCM); 2743 return 0;
2680 if (val == -1)
2681 return 0;
2682 }
2683 if (!val) {
2684 val = snd_hda_param_read(codec, codec->afg, AC_PAR_PCM);
2685 if (val == -1)
2686 return 0;
2687 }
2688 2744
2689 rate = format & 0xff00; 2745 rate = format & 0xff00;
2690 for (i = 0; i < AC_PAR_PCM_RATE_BITS; i++) 2746 for (i = 0; i < AC_PAR_PCM_RATE_BITS; i++)
@@ -2696,12 +2752,8 @@ int snd_hda_is_supported_format(struct hda_codec *codec, hda_nid_t nid,
2696 if (i >= AC_PAR_PCM_RATE_BITS) 2752 if (i >= AC_PAR_PCM_RATE_BITS)
2697 return 0; 2753 return 0;
2698 2754
2699 stream = snd_hda_param_read(codec, nid, AC_PAR_STREAM); 2755 stream = query_stream_param(codec, nid);
2700 if (stream == -1) 2756 if (!stream)
2701 return 0;
2702 if (!stream && nid != codec->afg)
2703 stream = snd_hda_param_read(codec, codec->afg, AC_PAR_STREAM);
2704 if (!stream || stream == -1)
2705 return 0; 2757 return 0;
2706 2758
2707 if (stream & AC_SUPFMT_PCM) { 2759 if (stream & AC_SUPFMT_PCM) {
@@ -3835,11 +3887,10 @@ EXPORT_SYMBOL_HDA(auto_pin_cfg_labels);
3835/** 3887/**
3836 * snd_hda_suspend - suspend the codecs 3888 * snd_hda_suspend - suspend the codecs
3837 * @bus: the HDA bus 3889 * @bus: the HDA bus
3838 * @state: suspsend state
3839 * 3890 *
3840 * Returns 0 if successful. 3891 * Returns 0 if successful.
3841 */ 3892 */
3842int snd_hda_suspend(struct hda_bus *bus, pm_message_t state) 3893int snd_hda_suspend(struct hda_bus *bus)
3843{ 3894{
3844 struct hda_codec *codec; 3895 struct hda_codec *codec;
3845 3896
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 2fdecf4b0eb6..cad79efaabc9 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -574,6 +574,8 @@ struct hda_bus_ops {
574 /* attach a PCM stream */ 574 /* attach a PCM stream */
575 int (*attach_pcm)(struct hda_bus *bus, struct hda_codec *codec, 575 int (*attach_pcm)(struct hda_bus *bus, struct hda_codec *codec,
576 struct hda_pcm *pcm); 576 struct hda_pcm *pcm);
577 /* reset bus for retry verb */
578 void (*bus_reset)(struct hda_bus *bus);
577#ifdef CONFIG_SND_HDA_POWER_SAVE 579#ifdef CONFIG_SND_HDA_POWER_SAVE
578 /* notify power-up/down from codec to controller */ 580 /* notify power-up/down from codec to controller */
579 void (*pm_notify)(struct hda_bus *bus); 581 void (*pm_notify)(struct hda_bus *bus);
@@ -622,7 +624,13 @@ struct hda_bus {
622 624
623 /* misc op flags */ 625 /* misc op flags */
624 unsigned int needs_damn_long_delay :1; 626 unsigned int needs_damn_long_delay :1;
627 unsigned int allow_bus_reset:1; /* allow bus reset at fatal error */
628 unsigned int sync_write:1; /* sync after verb write */
629 /* status for codec/controller */
625 unsigned int shutdown :1; /* being unloaded */ 630 unsigned int shutdown :1; /* being unloaded */
631 unsigned int rirb_error:1; /* error in codec communication */
632 unsigned int response_reset:1; /* controller was reset */
633 unsigned int in_reset:1; /* during reset operation */
626}; 634};
627 635
628/* 636/*
@@ -747,7 +755,8 @@ struct hda_codec {
747 /* detected preset */ 755 /* detected preset */
748 const struct hda_codec_preset *preset; 756 const struct hda_codec_preset *preset;
749 struct module *owner; 757 struct module *owner;
750 const char *name; /* codec name */ 758 const char *vendor_name; /* codec vendor name */
759 const char *chip_name; /* codec chip name */
751 const char *modelname; /* model name for preset */ 760 const char *modelname; /* model name for preset */
752 761
753 /* set by patch */ 762 /* set by patch */
@@ -905,7 +914,7 @@ void snd_hda_get_codec_name(struct hda_codec *codec, char *name, int namelen);
905 * power management 914 * power management
906 */ 915 */
907#ifdef CONFIG_PM 916#ifdef CONFIG_PM
908int snd_hda_suspend(struct hda_bus *bus, pm_message_t state); 917int snd_hda_suspend(struct hda_bus *bus);
909int snd_hda_resume(struct hda_bus *bus); 918int snd_hda_resume(struct hda_bus *bus);
910#endif 919#endif
911 920
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 1c57505c2874..6812fbe80fa4 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -242,7 +242,8 @@ CODEC_INFO_SHOW(subsystem_id);
242CODEC_INFO_SHOW(revision_id); 242CODEC_INFO_SHOW(revision_id);
243CODEC_INFO_SHOW(afg); 243CODEC_INFO_SHOW(afg);
244CODEC_INFO_SHOW(mfg); 244CODEC_INFO_SHOW(mfg);
245CODEC_INFO_STR_SHOW(name); 245CODEC_INFO_STR_SHOW(vendor_name);
246CODEC_INFO_STR_SHOW(chip_name);
246CODEC_INFO_STR_SHOW(modelname); 247CODEC_INFO_STR_SHOW(modelname);
247 248
248#define CODEC_INFO_STORE(type) \ 249#define CODEC_INFO_STORE(type) \
@@ -275,7 +276,8 @@ static ssize_t type##_store(struct device *dev, \
275CODEC_INFO_STORE(vendor_id); 276CODEC_INFO_STORE(vendor_id);
276CODEC_INFO_STORE(subsystem_id); 277CODEC_INFO_STORE(subsystem_id);
277CODEC_INFO_STORE(revision_id); 278CODEC_INFO_STORE(revision_id);
278CODEC_INFO_STR_STORE(name); 279CODEC_INFO_STR_STORE(vendor_name);
280CODEC_INFO_STR_STORE(chip_name);
279CODEC_INFO_STR_STORE(modelname); 281CODEC_INFO_STR_STORE(modelname);
280 282
281#define CODEC_ACTION_STORE(type) \ 283#define CODEC_ACTION_STORE(type) \
@@ -499,7 +501,8 @@ static struct device_attribute codec_attrs[] = {
499 CODEC_ATTR_RW(revision_id), 501 CODEC_ATTR_RW(revision_id),
500 CODEC_ATTR_RO(afg), 502 CODEC_ATTR_RO(afg),
501 CODEC_ATTR_RO(mfg), 503 CODEC_ATTR_RO(mfg),
502 CODEC_ATTR_RW(name), 504 CODEC_ATTR_RW(vendor_name),
505 CODEC_ATTR_RW(chip_name),
503 CODEC_ATTR_RW(modelname), 506 CODEC_ATTR_RW(modelname),
504 CODEC_ATTR_RW(init_verbs), 507 CODEC_ATTR_RW(init_verbs),
505 CODEC_ATTR_RW(hints), 508 CODEC_ATTR_RW(hints),
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3128e1a6bc65..4e9ea7080270 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -128,21 +128,33 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
128 "{ULI, M5461}}"); 128 "{ULI, M5461}}");
129MODULE_DESCRIPTION("Intel HDA driver"); 129MODULE_DESCRIPTION("Intel HDA driver");
130 130
131#ifdef CONFIG_SND_VERBOSE_PRINTK
132#define SFX /* nop */
133#else
131#define SFX "hda-intel: " 134#define SFX "hda-intel: "
132 135#endif
133 136
134/* 137/*
135 * registers 138 * registers
136 */ 139 */
137#define ICH6_REG_GCAP 0x00 140#define ICH6_REG_GCAP 0x00
141#define ICH6_GCAP_64OK (1 << 0) /* 64bit address support */
142#define ICH6_GCAP_NSDO (3 << 1) /* # of serial data out signals */
143#define ICH6_GCAP_BSS (31 << 3) /* # of bidirectional streams */
144#define ICH6_GCAP_ISS (15 << 8) /* # of input streams */
145#define ICH6_GCAP_OSS (15 << 12) /* # of output streams */
138#define ICH6_REG_VMIN 0x02 146#define ICH6_REG_VMIN 0x02
139#define ICH6_REG_VMAJ 0x03 147#define ICH6_REG_VMAJ 0x03
140#define ICH6_REG_OUTPAY 0x04 148#define ICH6_REG_OUTPAY 0x04
141#define ICH6_REG_INPAY 0x06 149#define ICH6_REG_INPAY 0x06
142#define ICH6_REG_GCTL 0x08 150#define ICH6_REG_GCTL 0x08
151#define ICH6_GCTL_RESET (1 << 0) /* controller reset */
152#define ICH6_GCTL_FCNTRL (1 << 1) /* flush control */
153#define ICH6_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */
143#define ICH6_REG_WAKEEN 0x0c 154#define ICH6_REG_WAKEEN 0x0c
144#define ICH6_REG_STATESTS 0x0e 155#define ICH6_REG_STATESTS 0x0e
145#define ICH6_REG_GSTS 0x10 156#define ICH6_REG_GSTS 0x10
157#define ICH6_GSTS_FSTS (1 << 1) /* flush status */
146#define ICH6_REG_INTCTL 0x20 158#define ICH6_REG_INTCTL 0x20
147#define ICH6_REG_INTSTS 0x24 159#define ICH6_REG_INTSTS 0x24
148#define ICH6_REG_WALCLK 0x30 160#define ICH6_REG_WALCLK 0x30
@@ -150,17 +162,27 @@ MODULE_DESCRIPTION("Intel HDA driver");
150#define ICH6_REG_CORBLBASE 0x40 162#define ICH6_REG_CORBLBASE 0x40
151#define ICH6_REG_CORBUBASE 0x44 163#define ICH6_REG_CORBUBASE 0x44
152#define ICH6_REG_CORBWP 0x48 164#define ICH6_REG_CORBWP 0x48
153#define ICH6_REG_CORBRP 0x4A 165#define ICH6_REG_CORBRP 0x4a
166#define ICH6_CORBRP_RST (1 << 15) /* read pointer reset */
154#define ICH6_REG_CORBCTL 0x4c 167#define ICH6_REG_CORBCTL 0x4c
168#define ICH6_CORBCTL_RUN (1 << 1) /* enable DMA */
169#define ICH6_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */
155#define ICH6_REG_CORBSTS 0x4d 170#define ICH6_REG_CORBSTS 0x4d
171#define ICH6_CORBSTS_CMEI (1 << 0) /* memory error indication */
156#define ICH6_REG_CORBSIZE 0x4e 172#define ICH6_REG_CORBSIZE 0x4e
157 173
158#define ICH6_REG_RIRBLBASE 0x50 174#define ICH6_REG_RIRBLBASE 0x50
159#define ICH6_REG_RIRBUBASE 0x54 175#define ICH6_REG_RIRBUBASE 0x54
160#define ICH6_REG_RIRBWP 0x58 176#define ICH6_REG_RIRBWP 0x58
177#define ICH6_RIRBWP_RST (1 << 15) /* write pointer reset */
161#define ICH6_REG_RINTCNT 0x5a 178#define ICH6_REG_RINTCNT 0x5a
162#define ICH6_REG_RIRBCTL 0x5c 179#define ICH6_REG_RIRBCTL 0x5c
180#define ICH6_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */
181#define ICH6_RBCTL_DMA_EN (1 << 1) /* enable DMA */
182#define ICH6_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */
163#define ICH6_REG_RIRBSTS 0x5d 183#define ICH6_REG_RIRBSTS 0x5d
184#define ICH6_RBSTS_IRQ (1 << 0) /* response irq */
185#define ICH6_RBSTS_OVERRUN (1 << 2) /* overrun irq */
164#define ICH6_REG_RIRBSIZE 0x5e 186#define ICH6_REG_RIRBSIZE 0x5e
165 187
166#define ICH6_REG_IC 0x60 188#define ICH6_REG_IC 0x60
@@ -257,16 +279,6 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
257#define ICH6_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */ 279#define ICH6_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
258#define ICH6_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */ 280#define ICH6_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
259 281
260/* GCTL unsolicited response enable bit */
261#define ICH6_GCTL_UREN (1<<8)
262
263/* GCTL reset bit */
264#define ICH6_GCTL_RESET (1<<0)
265
266/* CORB/RIRB control, read/write pointer */
267#define ICH6_RBCTL_DMA_EN 0x02 /* enable DMA */
268#define ICH6_RBCTL_IRQ_EN 0x01 /* enable IRQ */
269#define ICH6_RBRWP_CLR 0x8000 /* read/write pointer clear */
270/* below are so far hardcoded - should read registers in future */ 282/* below are so far hardcoded - should read registers in future */
271#define ICH6_MAX_CORB_ENTRIES 256 283#define ICH6_MAX_CORB_ENTRIES 256
272#define ICH6_MAX_RIRB_ENTRIES 256 284#define ICH6_MAX_RIRB_ENTRIES 256
@@ -512,25 +524,25 @@ static void azx_init_cmd_io(struct azx *chip)
512 /* set the corb write pointer to 0 */ 524 /* set the corb write pointer to 0 */
513 azx_writew(chip, CORBWP, 0); 525 azx_writew(chip, CORBWP, 0);
514 /* reset the corb hw read pointer */ 526 /* reset the corb hw read pointer */
515 azx_writew(chip, CORBRP, ICH6_RBRWP_CLR); 527 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
516 /* enable corb dma */ 528 /* enable corb dma */
517 azx_writeb(chip, CORBCTL, ICH6_RBCTL_DMA_EN); 529 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
518 530
519 /* RIRB set up */ 531 /* RIRB set up */
520 chip->rirb.addr = chip->rb.addr + 2048; 532 chip->rirb.addr = chip->rb.addr + 2048;
521 chip->rirb.buf = (u32 *)(chip->rb.area + 2048); 533 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
534 chip->rirb.wp = chip->rirb.rp = chip->rirb.cmds = 0;
522 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr); 535 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
523 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr)); 536 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
524 537
525 /* set the rirb size to 256 entries (ULI requires explicitly) */ 538 /* set the rirb size to 256 entries (ULI requires explicitly) */
526 azx_writeb(chip, RIRBSIZE, 0x02); 539 azx_writeb(chip, RIRBSIZE, 0x02);
527 /* reset the rirb hw write pointer */ 540 /* reset the rirb hw write pointer */
528 azx_writew(chip, RIRBWP, ICH6_RBRWP_CLR); 541 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
529 /* set N=1, get RIRB response interrupt for new entry */ 542 /* set N=1, get RIRB response interrupt for new entry */
530 azx_writew(chip, RINTCNT, 1); 543 azx_writew(chip, RINTCNT, 1);
531 /* enable rirb dma and response irq */ 544 /* enable rirb dma and response irq */
532 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN); 545 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
533 chip->rirb.rp = chip->rirb.cmds = 0;
534} 546}
535 547
536static void azx_free_cmd_io(struct azx *chip) 548static void azx_free_cmd_io(struct azx *chip)
@@ -606,6 +618,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus)
606 } 618 }
607 if (!chip->rirb.cmds) { 619 if (!chip->rirb.cmds) {
608 smp_rmb(); 620 smp_rmb();
621 bus->rirb_error = 0;
609 return chip->rirb.res; /* the last value */ 622 return chip->rirb.res; /* the last value */
610 } 623 }
611 if (time_after(jiffies, timeout)) 624 if (time_after(jiffies, timeout))
@@ -619,19 +632,21 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus)
619 } 632 }
620 633
621 if (chip->msi) { 634 if (chip->msi) {
622 snd_printk(KERN_WARNING "hda_intel: No response from codec, " 635 snd_printk(KERN_WARNING SFX "No response from codec, "
623 "disabling MSI: last cmd=0x%08x\n", chip->last_cmd); 636 "disabling MSI: last cmd=0x%08x\n", chip->last_cmd);
624 free_irq(chip->irq, chip); 637 free_irq(chip->irq, chip);
625 chip->irq = -1; 638 chip->irq = -1;
626 pci_disable_msi(chip->pci); 639 pci_disable_msi(chip->pci);
627 chip->msi = 0; 640 chip->msi = 0;
628 if (azx_acquire_irq(chip, 1) < 0) 641 if (azx_acquire_irq(chip, 1) < 0) {
642 bus->rirb_error = 1;
629 return -1; 643 return -1;
644 }
630 goto again; 645 goto again;
631 } 646 }
632 647
633 if (!chip->polling_mode) { 648 if (!chip->polling_mode) {
634 snd_printk(KERN_WARNING "hda_intel: azx_get_response timeout, " 649 snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
635 "switching to polling mode: last cmd=0x%08x\n", 650 "switching to polling mode: last cmd=0x%08x\n",
636 chip->last_cmd); 651 chip->last_cmd);
637 chip->polling_mode = 1; 652 chip->polling_mode = 1;
@@ -646,14 +661,23 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus)
646 return -1; 661 return -1;
647 } 662 }
648 663
664 /* a fatal communication error; need either to reset or to fallback
665 * to the single_cmd mode
666 */
667 bus->rirb_error = 1;
668 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
669 bus->response_reset = 1;
670 return -1; /* give a chance to retry */
671 }
672
649 snd_printk(KERN_ERR "hda_intel: azx_get_response timeout, " 673 snd_printk(KERN_ERR "hda_intel: azx_get_response timeout, "
650 "switching to single_cmd mode: last cmd=0x%08x\n", 674 "switching to single_cmd mode: last cmd=0x%08x\n",
651 chip->last_cmd); 675 chip->last_cmd);
652 chip->rirb.rp = azx_readb(chip, RIRBWP);
653 chip->rirb.cmds = 0;
654 /* switch to single_cmd mode */
655 chip->single_cmd = 1; 676 chip->single_cmd = 1;
677 bus->response_reset = 0;
678 /* re-initialize CORB/RIRB */
656 azx_free_cmd_io(chip); 679 azx_free_cmd_io(chip);
680 azx_init_cmd_io(chip);
657 return -1; 681 return -1;
658} 682}
659 683
@@ -667,12 +691,34 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus)
667 * I left the codes, however, for debugging/testing purposes. 691 * I left the codes, however, for debugging/testing purposes.
668 */ 692 */
669 693
694/* receive a response */
695static int azx_single_wait_for_response(struct azx *chip)
696{
697 int timeout = 50;
698
699 while (timeout--) {
700 /* check IRV busy bit */
701 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
702 /* reuse rirb.res as the response return value */
703 chip->rirb.res = azx_readl(chip, IR);
704 return 0;
705 }
706 udelay(1);
707 }
708 if (printk_ratelimit())
709 snd_printd(SFX "get_response timeout: IRS=0x%x\n",
710 azx_readw(chip, IRS));
711 chip->rirb.res = -1;
712 return -EIO;
713}
714
670/* send a command */ 715/* send a command */
671static int azx_single_send_cmd(struct hda_bus *bus, u32 val) 716static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
672{ 717{
673 struct azx *chip = bus->private_data; 718 struct azx *chip = bus->private_data;
674 int timeout = 50; 719 int timeout = 50;
675 720
721 bus->rirb_error = 0;
676 while (timeout--) { 722 while (timeout--) {
677 /* check ICB busy bit */ 723 /* check ICB busy bit */
678 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) { 724 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
@@ -682,7 +728,7 @@ static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
682 azx_writel(chip, IC, val); 728 azx_writel(chip, IC, val);
683 azx_writew(chip, IRS, azx_readw(chip, IRS) | 729 azx_writew(chip, IRS, azx_readw(chip, IRS) |
684 ICH6_IRS_BUSY); 730 ICH6_IRS_BUSY);
685 return 0; 731 return azx_single_wait_for_response(chip);
686 } 732 }
687 udelay(1); 733 udelay(1);
688 } 734 }
@@ -696,18 +742,7 @@ static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
696static unsigned int azx_single_get_response(struct hda_bus *bus) 742static unsigned int azx_single_get_response(struct hda_bus *bus)
697{ 743{
698 struct azx *chip = bus->private_data; 744 struct azx *chip = bus->private_data;
699 int timeout = 50; 745 return chip->rirb.res;
700
701 while (timeout--) {
702 /* check IRV busy bit */
703 if (azx_readw(chip, IRS) & ICH6_IRS_VALID)
704 return azx_readl(chip, IR);
705 udelay(1);
706 }
707 if (printk_ratelimit())
708 snd_printd(SFX "get_response timeout: IRS=0x%x\n",
709 azx_readw(chip, IRS));
710 return (unsigned int)-1;
711} 746}
712 747
713/* 748/*
@@ -775,17 +810,17 @@ static int azx_reset(struct azx *chip)
775 810
776 /* check to see if controller is ready */ 811 /* check to see if controller is ready */
777 if (!azx_readb(chip, GCTL)) { 812 if (!azx_readb(chip, GCTL)) {
778 snd_printd("azx_reset: controller not ready!\n"); 813 snd_printd(SFX "azx_reset: controller not ready!\n");
779 return -EBUSY; 814 return -EBUSY;
780 } 815 }
781 816
782 /* Accept unsolicited responses */ 817 /* Accept unsolicited responses */
783 azx_writel(chip, GCTL, azx_readl(chip, GCTL) | ICH6_GCTL_UREN); 818 azx_writel(chip, GCTL, azx_readl(chip, GCTL) | ICH6_GCTL_UNSOL);
784 819
785 /* detect codecs */ 820 /* detect codecs */
786 if (!chip->codec_mask) { 821 if (!chip->codec_mask) {
787 chip->codec_mask = azx_readw(chip, STATESTS); 822 chip->codec_mask = azx_readw(chip, STATESTS);
788 snd_printdd("codec_mask = 0x%x\n", chip->codec_mask); 823 snd_printdd(SFX "codec_mask = 0x%x\n", chip->codec_mask);
789 } 824 }
790 825
791 return 0; 826 return 0;
@@ -895,8 +930,7 @@ static void azx_init_chip(struct azx *chip)
895 azx_int_enable(chip); 930 azx_int_enable(chip);
896 931
897 /* initialize the codec command I/O */ 932 /* initialize the codec command I/O */
898 if (!chip->single_cmd) 933 azx_init_cmd_io(chip);
899 azx_init_cmd_io(chip);
900 934
901 /* program the position buffer */ 935 /* program the position buffer */
902 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr); 936 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
@@ -953,12 +987,12 @@ static void azx_init_pci(struct azx *chip)
953 case AZX_DRIVER_SCH: 987 case AZX_DRIVER_SCH:
954 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); 988 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
955 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { 989 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
956 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, \ 990 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
957 snoop & (~INTEL_SCH_HDA_DEVC_NOSNOOP)); 991 snoop & (~INTEL_SCH_HDA_DEVC_NOSNOOP));
958 pci_read_config_word(chip->pci, 992 pci_read_config_word(chip->pci,
959 INTEL_SCH_HDA_DEVC, &snoop); 993 INTEL_SCH_HDA_DEVC, &snoop);
960 snd_printdd("HDA snoop disabled, enabling ... %s\n",\ 994 snd_printdd(SFX "HDA snoop disabled, enabling ... %s\n",
961 (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) \ 995 (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
962 ? "Failed" : "OK"); 996 ? "Failed" : "OK");
963 } 997 }
964 break; 998 break;
@@ -1012,7 +1046,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1012 /* clear rirb int */ 1046 /* clear rirb int */
1013 status = azx_readb(chip, RIRBSTS); 1047 status = azx_readb(chip, RIRBSTS);
1014 if (status & RIRB_INT_MASK) { 1048 if (status & RIRB_INT_MASK) {
1015 if (!chip->single_cmd && (status & RIRB_INT_RESPONSE)) 1049 if (status & RIRB_INT_RESPONSE)
1016 azx_update_rirb(chip); 1050 azx_update_rirb(chip);
1017 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); 1051 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1018 } 1052 }
@@ -1098,7 +1132,7 @@ static int azx_setup_periods(struct azx *chip,
1098 pos_align; 1132 pos_align;
1099 pos_adj = frames_to_bytes(runtime, pos_adj); 1133 pos_adj = frames_to_bytes(runtime, pos_adj);
1100 if (pos_adj >= period_bytes) { 1134 if (pos_adj >= period_bytes) {
1101 snd_printk(KERN_WARNING "Too big adjustment %d\n", 1135 snd_printk(KERN_WARNING SFX "Too big adjustment %d\n",
1102 bdl_pos_adj[chip->dev_index]); 1136 bdl_pos_adj[chip->dev_index]);
1103 pos_adj = 0; 1137 pos_adj = 0;
1104 } else { 1138 } else {
@@ -1122,7 +1156,7 @@ static int azx_setup_periods(struct azx *chip,
1122 return 0; 1156 return 0;
1123 1157
1124 error: 1158 error:
1125 snd_printk(KERN_ERR "Too many BDL entries: buffer=%d, period=%d\n", 1159 snd_printk(KERN_ERR SFX "Too many BDL entries: buffer=%d, period=%d\n",
1126 azx_dev->bufsize, period_bytes); 1160 azx_dev->bufsize, period_bytes);
1127 return -EINVAL; 1161 return -EINVAL;
1128} 1162}
@@ -1215,7 +1249,7 @@ static int probe_codec(struct azx *chip, int addr)
1215 chip->probing = 0; 1249 chip->probing = 0;
1216 if (res == -1) 1250 if (res == -1)
1217 return -EIO; 1251 return -EIO;
1218 snd_printdd("hda_intel: codec #%d probed OK\n", addr); 1252 snd_printdd(SFX "codec #%d probed OK\n", addr);
1219 return 0; 1253 return 0;
1220} 1254}
1221 1255
@@ -1223,6 +1257,26 @@ static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
1223 struct hda_pcm *cpcm); 1257 struct hda_pcm *cpcm);
1224static void azx_stop_chip(struct azx *chip); 1258static void azx_stop_chip(struct azx *chip);
1225 1259
1260static void azx_bus_reset(struct hda_bus *bus)
1261{
1262 struct azx *chip = bus->private_data;
1263
1264 bus->in_reset = 1;
1265 azx_stop_chip(chip);
1266 azx_init_chip(chip);
1267#ifdef CONFIG_PM
1268 if (chip->initialized) {
1269 int i;
1270
1271 for (i = 0; i < AZX_MAX_PCMS; i++)
1272 snd_pcm_suspend_all(chip->pcm[i]);
1273 snd_hda_suspend(chip->bus);
1274 snd_hda_resume(chip->bus);
1275 }
1276#endif
1277 bus->in_reset = 0;
1278}
1279
1226/* 1280/*
1227 * Codec initialization 1281 * Codec initialization
1228 */ 1282 */
@@ -1246,6 +1300,7 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
1246 bus_temp.ops.command = azx_send_cmd; 1300 bus_temp.ops.command = azx_send_cmd;
1247 bus_temp.ops.get_response = azx_get_response; 1301 bus_temp.ops.get_response = azx_get_response;
1248 bus_temp.ops.attach_pcm = azx_attach_pcm_stream; 1302 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1303 bus_temp.ops.bus_reset = azx_bus_reset;
1249#ifdef CONFIG_SND_HDA_POWER_SAVE 1304#ifdef CONFIG_SND_HDA_POWER_SAVE
1250 bus_temp.power_save = &power_save; 1305 bus_temp.power_save = &power_save;
1251 bus_temp.ops.pm_notify = azx_power_notify; 1306 bus_temp.ops.pm_notify = azx_power_notify;
@@ -1270,8 +1325,8 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
1270 /* Some BIOSen give you wrong codec addresses 1325 /* Some BIOSen give you wrong codec addresses
1271 * that don't exist 1326 * that don't exist
1272 */ 1327 */
1273 snd_printk(KERN_WARNING 1328 snd_printk(KERN_WARNING SFX
1274 "hda_intel: Codec #%d probe error; " 1329 "Codec #%d probe error; "
1275 "disabling it...\n", c); 1330 "disabling it...\n", c);
1276 chip->codec_mask &= ~(1 << c); 1331 chip->codec_mask &= ~(1 << c);
1277 /* More badly, accessing to a non-existing 1332 /* More badly, accessing to a non-existing
@@ -1487,7 +1542,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1487 bufsize = snd_pcm_lib_buffer_bytes(substream); 1542 bufsize = snd_pcm_lib_buffer_bytes(substream);
1488 period_bytes = snd_pcm_lib_period_bytes(substream); 1543 period_bytes = snd_pcm_lib_period_bytes(substream);
1489 1544
1490 snd_printdd("azx_pcm_prepare: bufsize=0x%x, format=0x%x\n", 1545 snd_printdd(SFX "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
1491 bufsize, format_val); 1546 bufsize, format_val);
1492 1547
1493 if (bufsize != azx_dev->bufsize || 1548 if (bufsize != azx_dev->bufsize ||
@@ -1830,7 +1885,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
1830 &pcm); 1885 &pcm);
1831 if (err < 0) 1886 if (err < 0)
1832 return err; 1887 return err;
1833 strcpy(pcm->name, cpcm->name); 1888 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
1834 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 1889 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1835 if (apcm == NULL) 1890 if (apcm == NULL)
1836 return -ENOMEM; 1891 return -ENOMEM;
@@ -1973,7 +2028,7 @@ static int azx_suspend(struct pci_dev *pci, pm_message_t state)
1973 for (i = 0; i < AZX_MAX_PCMS; i++) 2028 for (i = 0; i < AZX_MAX_PCMS; i++)
1974 snd_pcm_suspend_all(chip->pcm[i]); 2029 snd_pcm_suspend_all(chip->pcm[i]);
1975 if (chip->initialized) 2030 if (chip->initialized)
1976 snd_hda_suspend(chip->bus, state); 2031 snd_hda_suspend(chip->bus);
1977 azx_stop_chip(chip); 2032 azx_stop_chip(chip);
1978 if (chip->irq >= 0) { 2033 if (chip->irq >= 0) {
1979 free_irq(chip->irq, chip); 2034 free_irq(chip->irq, chip);
@@ -2265,14 +2320,14 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2265 synchronize_irq(chip->irq); 2320 synchronize_irq(chip->irq);
2266 2321
2267 gcap = azx_readw(chip, GCAP); 2322 gcap = azx_readw(chip, GCAP);
2268 snd_printdd("chipset global capabilities = 0x%x\n", gcap); 2323 snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
2269 2324
2270 /* ATI chips seems buggy about 64bit DMA addresses */ 2325 /* ATI chips seems buggy about 64bit DMA addresses */
2271 if (chip->driver_type == AZX_DRIVER_ATI) 2326 if (chip->driver_type == AZX_DRIVER_ATI)
2272 gcap &= ~0x01; 2327 gcap &= ~ICH6_GCAP_64OK;
2273 2328
2274 /* allow 64bit DMA address if supported by H/W */ 2329 /* allow 64bit DMA address if supported by H/W */
2275 if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 2330 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
2276 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); 2331 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
2277 else { 2332 else {
2278 pci_set_dma_mask(pci, DMA_BIT_MASK(32)); 2333 pci_set_dma_mask(pci, DMA_BIT_MASK(32));
@@ -2309,7 +2364,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2309 chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev), 2364 chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev),
2310 GFP_KERNEL); 2365 GFP_KERNEL);
2311 if (!chip->azx_dev) { 2366 if (!chip->azx_dev) {
2312 snd_printk(KERN_ERR "cannot malloc azx_dev\n"); 2367 snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
2313 goto errout; 2368 goto errout;
2314 } 2369 }
2315 2370
@@ -2332,11 +2387,9 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2332 goto errout; 2387 goto errout;
2333 } 2388 }
2334 /* allocate CORB/RIRB */ 2389 /* allocate CORB/RIRB */
2335 if (!chip->single_cmd) { 2390 err = azx_alloc_cmd_io(chip);
2336 err = azx_alloc_cmd_io(chip); 2391 if (err < 0)
2337 if (err < 0) 2392 goto errout;
2338 goto errout;
2339 }
2340 2393
2341 /* initialize streams */ 2394 /* initialize streams */
2342 azx_init_stream(chip); 2395 azx_init_stream(chip);
@@ -2359,9 +2412,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2359 } 2412 }
2360 2413
2361 strcpy(card->driver, "HDA-Intel"); 2414 strcpy(card->driver, "HDA-Intel");
2362 strcpy(card->shortname, driver_short_names[chip->driver_type]); 2415 strlcpy(card->shortname, driver_short_names[chip->driver_type],
2363 sprintf(card->longname, "%s at 0x%lx irq %i", 2416 sizeof(card->shortname));
2364 card->shortname, chip->addr, chip->irq); 2417 snprintf(card->longname, sizeof(card->longname),
2418 "%s at 0x%lx irq %i",
2419 card->shortname, chip->addr, chip->irq);
2365 2420
2366 *rchip = chip; 2421 *rchip = chip;
2367 return 0; 2422 return 0;
@@ -2514,6 +2569,20 @@ static struct pci_device_id azx_ids[] = {
2514 { PCI_DEVICE(0x10de, 0x0d97), .driver_data = AZX_DRIVER_NVIDIA }, 2569 { PCI_DEVICE(0x10de, 0x0d97), .driver_data = AZX_DRIVER_NVIDIA },
2515 /* Teradici */ 2570 /* Teradici */
2516 { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA }, 2571 { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA },
2572 /* Creative X-Fi (CA0110-IBG) */
2573#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
2574 /* the following entry conflicts with snd-ctxfi driver,
2575 * as ctxfi driver mutates from HD-audio to native mode with
2576 * a special command sequence.
2577 */
2578 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
2579 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2580 .class_mask = 0xffffff,
2581 .driver_data = AZX_DRIVER_GENERIC },
2582#else
2583 /* this entry seems still valid -- i.e. without emu20kx chip */
2584 { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC },
2585#endif
2517 /* AMD Generic, PCI class code and Vendor ID for HD Audio */ 2586 /* AMD Generic, PCI class code and Vendor ID for HD Audio */
2518 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), 2587 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
2519 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2588 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 93d7499350c6..418c5d1badaa 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -466,8 +466,12 @@ static void print_codec_info(struct snd_info_entry *entry,
466 hda_nid_t nid; 466 hda_nid_t nid;
467 int i, nodes; 467 int i, nodes;
468 468
469 snd_iprintf(buffer, "Codec: %s\n", 469 snd_iprintf(buffer, "Codec: ");
470 codec->name ? codec->name : "Not Set"); 470 if (codec->vendor_name && codec->chip_name)
471 snd_iprintf(buffer, "%s %s\n",
472 codec->vendor_name, codec->chip_name);
473 else
474 snd_iprintf(buffer, "Not Set\n");
471 snd_iprintf(buffer, "Address: %d\n", codec->addr); 475 snd_iprintf(buffer, "Address: %d\n", codec->addr);
472 snd_iprintf(buffer, "Function Id: 0x%x\n", codec->function_id); 476 snd_iprintf(buffer, "Function Id: 0x%x\n", codec->function_id);
473 snd_iprintf(buffer, "Vendor Id: 0x%08x\n", codec->vendor_id); 477 snd_iprintf(buffer, "Vendor Id: 0x%08x\n", codec->vendor_id);
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
new file mode 100644
index 000000000000..392d108c3558
--- /dev/null
+++ b/sound/pci/hda/patch_ca0110.c
@@ -0,0 +1,573 @@
1/*
2 * HD audio interface patch for Creative X-Fi CA0110-IBG chip
3 *
4 * Copyright (c) 2008 Takashi Iwai <tiwai@suse.de>
5 *
6 * This driver is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This driver is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/pci.h>
25#include <sound/core.h>
26#include "hda_codec.h"
27#include "hda_local.h"
28
29/*
30 */
31
32struct ca0110_spec {
33 struct auto_pin_cfg autocfg;
34 struct hda_multi_out multiout;
35 hda_nid_t out_pins[AUTO_CFG_MAX_OUTS];
36 hda_nid_t dacs[AUTO_CFG_MAX_OUTS];
37 hda_nid_t hp_dac;
38 hda_nid_t input_pins[AUTO_PIN_LAST];
39 hda_nid_t adcs[AUTO_PIN_LAST];
40 hda_nid_t dig_out;
41 hda_nid_t dig_in;
42 unsigned int num_inputs;
43 const char *input_labels[AUTO_PIN_LAST];
44 struct hda_pcm pcm_rec[2]; /* PCM information */
45};
46
47/*
48 * PCM callbacks
49 */
50static int ca0110_playback_pcm_open(struct hda_pcm_stream *hinfo,
51 struct hda_codec *codec,
52 struct snd_pcm_substream *substream)
53{
54 struct ca0110_spec *spec = codec->spec;
55 return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
56 hinfo);
57}
58
59static int ca0110_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
60 struct hda_codec *codec,
61 unsigned int stream_tag,
62 unsigned int format,
63 struct snd_pcm_substream *substream)
64{
65 struct ca0110_spec *spec = codec->spec;
66 return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
67 stream_tag, format, substream);
68}
69
70static int ca0110_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
71 struct hda_codec *codec,
72 struct snd_pcm_substream *substream)
73{
74 struct ca0110_spec *spec = codec->spec;
75 return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
76}
77
78/*
79 * Digital out
80 */
81static int ca0110_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
82 struct hda_codec *codec,
83 struct snd_pcm_substream *substream)
84{
85 struct ca0110_spec *spec = codec->spec;
86 return snd_hda_multi_out_dig_open(codec, &spec->multiout);
87}
88
89static int ca0110_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
90 struct hda_codec *codec,
91 struct snd_pcm_substream *substream)
92{
93 struct ca0110_spec *spec = codec->spec;
94 return snd_hda_multi_out_dig_close(codec, &spec->multiout);
95}
96
97static int ca0110_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
98 struct hda_codec *codec,
99 unsigned int stream_tag,
100 unsigned int format,
101 struct snd_pcm_substream *substream)
102{
103 struct ca0110_spec *spec = codec->spec;
104 return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
105 format, substream);
106}
107
108/*
109 * Analog capture
110 */
111static int ca0110_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
112 struct hda_codec *codec,
113 unsigned int stream_tag,
114 unsigned int format,
115 struct snd_pcm_substream *substream)
116{
117 struct ca0110_spec *spec = codec->spec;
118
119 snd_hda_codec_setup_stream(codec, spec->adcs[substream->number],
120 stream_tag, 0, format);
121 return 0;
122}
123
124static int ca0110_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
125 struct hda_codec *codec,
126 struct snd_pcm_substream *substream)
127{
128 struct ca0110_spec *spec = codec->spec;
129
130 snd_hda_codec_cleanup_stream(codec, spec->adcs[substream->number]);
131 return 0;
132}
133
134/*
135 */
136
137static char *dirstr[2] = { "Playback", "Capture" };
138
139static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
140 int chan, int dir)
141{
142 char namestr[44];
143 int type = dir ? HDA_INPUT : HDA_OUTPUT;
144 struct snd_kcontrol_new knew =
145 HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
146 sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
147 return snd_hda_ctl_add(codec, snd_ctl_new1(&knew, codec));
148}
149
150static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
151 int chan, int dir)
152{
153 char namestr[44];
154 int type = dir ? HDA_INPUT : HDA_OUTPUT;
155 struct snd_kcontrol_new knew =
156 HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
157 sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
158 return snd_hda_ctl_add(codec, snd_ctl_new1(&knew, codec));
159}
160
161#define add_out_switch(codec, nid, pfx) _add_switch(codec, nid, pfx, 3, 0)
162#define add_out_volume(codec, nid, pfx) _add_volume(codec, nid, pfx, 3, 0)
163#define add_in_switch(codec, nid, pfx) _add_switch(codec, nid, pfx, 3, 1)
164#define add_in_volume(codec, nid, pfx) _add_volume(codec, nid, pfx, 3, 1)
165#define add_mono_switch(codec, nid, pfx, chan) \
166 _add_switch(codec, nid, pfx, chan, 0)
167#define add_mono_volume(codec, nid, pfx, chan) \
168 _add_volume(codec, nid, pfx, chan, 0)
169
170static int ca0110_build_controls(struct hda_codec *codec)
171{
172 struct ca0110_spec *spec = codec->spec;
173 struct auto_pin_cfg *cfg = &spec->autocfg;
174 static char *prefix[AUTO_CFG_MAX_OUTS] = {
175 "Front", "Surround", NULL, "Side", "Multi"
176 };
177 hda_nid_t mutenid;
178 int i, err;
179
180 for (i = 0; i < spec->multiout.num_dacs; i++) {
181 if (get_wcaps(codec, spec->out_pins[i]) & AC_WCAP_OUT_AMP)
182 mutenid = spec->out_pins[i];
183 else
184 mutenid = spec->multiout.dac_nids[i];
185 if (!prefix[i]) {
186 err = add_mono_switch(codec, mutenid,
187 "Center", 1);
188 if (err < 0)
189 return err;
190 err = add_mono_switch(codec, mutenid,
191 "LFE", 1);
192 if (err < 0)
193 return err;
194 err = add_mono_volume(codec, spec->multiout.dac_nids[i],
195 "Center", 1);
196 if (err < 0)
197 return err;
198 err = add_mono_volume(codec, spec->multiout.dac_nids[i],
199 "LFE", 1);
200 if (err < 0)
201 return err;
202 } else {
203 err = add_out_switch(codec, mutenid,
204 prefix[i]);
205 if (err < 0)
206 return err;
207 err = add_out_volume(codec, spec->multiout.dac_nids[i],
208 prefix[i]);
209 if (err < 0)
210 return err;
211 }
212 }
213 if (cfg->hp_outs) {
214 if (get_wcaps(codec, cfg->hp_pins[0]) & AC_WCAP_OUT_AMP)
215 mutenid = cfg->hp_pins[0];
216 else
217 mutenid = spec->multiout.dac_nids[i];
218
219 err = add_out_switch(codec, mutenid, "Headphone");
220 if (err < 0)
221 return err;
222 if (spec->hp_dac) {
223 err = add_out_volume(codec, spec->hp_dac, "Headphone");
224 if (err < 0)
225 return err;
226 }
227 }
228 for (i = 0; i < spec->num_inputs; i++) {
229 const char *label = spec->input_labels[i];
230 if (get_wcaps(codec, spec->input_pins[i]) & AC_WCAP_IN_AMP)
231 mutenid = spec->input_pins[i];
232 else
233 mutenid = spec->adcs[i];
234 err = add_in_switch(codec, mutenid, label);
235 if (err < 0)
236 return err;
237 err = add_in_volume(codec, spec->adcs[i], label);
238 if (err < 0)
239 return err;
240 }
241
242 if (spec->dig_out) {
243 err = snd_hda_create_spdif_out_ctls(codec, spec->dig_out);
244 if (err < 0)
245 return err;
246 err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
247 if (err < 0)
248 return err;
249 spec->multiout.share_spdif = 1;
250 }
251 if (spec->dig_in) {
252 err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
253 if (err < 0)
254 return err;
255 err = add_in_volume(codec, spec->dig_in, "IEC958");
256 }
257 return 0;
258}
259
260/*
261 */
262static struct hda_pcm_stream ca0110_pcm_analog_playback = {
263 .substreams = 1,
264 .channels_min = 2,
265 .channels_max = 8,
266 .ops = {
267 .open = ca0110_playback_pcm_open,
268 .prepare = ca0110_playback_pcm_prepare,
269 .cleanup = ca0110_playback_pcm_cleanup
270 },
271};
272
273static struct hda_pcm_stream ca0110_pcm_analog_capture = {
274 .substreams = 1,
275 .channels_min = 2,
276 .channels_max = 2,
277 .ops = {
278 .prepare = ca0110_capture_pcm_prepare,
279 .cleanup = ca0110_capture_pcm_cleanup
280 },
281};
282
283static struct hda_pcm_stream ca0110_pcm_digital_playback = {
284 .substreams = 1,
285 .channels_min = 2,
286 .channels_max = 2,
287 .ops = {
288 .open = ca0110_dig_playback_pcm_open,
289 .close = ca0110_dig_playback_pcm_close,
290 .prepare = ca0110_dig_playback_pcm_prepare
291 },
292};
293
294static struct hda_pcm_stream ca0110_pcm_digital_capture = {
295 .substreams = 1,
296 .channels_min = 2,
297 .channels_max = 2,
298};
299
300static int ca0110_build_pcms(struct hda_codec *codec)
301{
302 struct ca0110_spec *spec = codec->spec;
303 struct hda_pcm *info = spec->pcm_rec;
304
305 codec->pcm_info = info;
306 codec->num_pcms = 0;
307
308 info->name = "CA0110 Analog";
309 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0110_pcm_analog_playback;
310 info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dacs[0];
311 info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
312 spec->multiout.max_channels;
313 info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0110_pcm_analog_capture;
314 info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_inputs;
315 info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
316 codec->num_pcms++;
317
318 if (!spec->dig_out && !spec->dig_in)
319 return 0;
320
321 info++;
322 info->name = "CA0110 Digital";
323 info->pcm_type = HDA_PCM_TYPE_SPDIF;
324 if (spec->dig_out) {
325 info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
326 ca0110_pcm_digital_playback;
327 info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dig_out;
328 }
329 if (spec->dig_in) {
330 info->stream[SNDRV_PCM_STREAM_CAPTURE] =
331 ca0110_pcm_digital_capture;
332 info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
333 }
334 codec->num_pcms++;
335
336 return 0;
337}
338
339static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)
340{
341 if (pin) {
342 snd_hda_codec_write(codec, pin, 0,
343 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
344 if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP)
345 snd_hda_codec_write(codec, pin, 0,
346 AC_VERB_SET_AMP_GAIN_MUTE,
347 AMP_OUT_UNMUTE);
348 }
349 if (dac)
350 snd_hda_codec_write(codec, dac, 0,
351 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO);
352}
353
354static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc)
355{
356 if (pin) {
357 snd_hda_codec_write(codec, pin, 0,
358 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80);
359 if (get_wcaps(codec, pin) & AC_WCAP_IN_AMP)
360 snd_hda_codec_write(codec, pin, 0,
361 AC_VERB_SET_AMP_GAIN_MUTE,
362 AMP_IN_UNMUTE(0));
363 }
364 if (adc)
365 snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE,
366 AMP_IN_UNMUTE(0));
367}
368
369static int ca0110_init(struct hda_codec *codec)
370{
371 struct ca0110_spec *spec = codec->spec;
372 struct auto_pin_cfg *cfg = &spec->autocfg;
373 int i;
374
375 for (i = 0; i < spec->multiout.num_dacs; i++)
376 init_output(codec, spec->out_pins[i],
377 spec->multiout.dac_nids[i]);
378 init_output(codec, cfg->hp_pins[0], spec->hp_dac);
379 init_output(codec, cfg->dig_out_pins[0], spec->dig_out);
380
381 for (i = 0; i < spec->num_inputs; i++)
382 init_input(codec, spec->input_pins[i], spec->adcs[i]);
383 init_input(codec, cfg->dig_in_pin, spec->dig_in);
384 return 0;
385}
386
387static void ca0110_free(struct hda_codec *codec)
388{
389 kfree(codec->spec);
390}
391
392static struct hda_codec_ops ca0110_patch_ops = {
393 .build_controls = ca0110_build_controls,
394 .build_pcms = ca0110_build_pcms,
395 .init = ca0110_init,
396 .free = ca0110_free,
397};
398
399
400static void parse_line_outs(struct hda_codec *codec)
401{
402 struct ca0110_spec *spec = codec->spec;
403 struct auto_pin_cfg *cfg = &spec->autocfg;
404 int i, n;
405 unsigned int def_conf;
406 hda_nid_t nid;
407
408 n = 0;
409 for (i = 0; i < cfg->line_outs; i++) {
410 nid = cfg->line_out_pins[i];
411 def_conf = snd_hda_codec_get_pincfg(codec, nid);
412 if (!def_conf)
413 continue; /* invalid pin */
414 if (snd_hda_get_connections(codec, nid, &spec->dacs[i], 1) != 1)
415 continue;
416 spec->out_pins[n++] = nid;
417 }
418 spec->multiout.dac_nids = spec->dacs;
419 spec->multiout.num_dacs = n;
420 spec->multiout.max_channels = n * 2;
421}
422
423static void parse_hp_out(struct hda_codec *codec)
424{
425 struct ca0110_spec *spec = codec->spec;
426 struct auto_pin_cfg *cfg = &spec->autocfg;
427 int i;
428 unsigned int def_conf;
429 hda_nid_t nid, dac;
430
431 if (!cfg->hp_outs)
432 return;
433 nid = cfg->hp_pins[0];
434 def_conf = snd_hda_codec_get_pincfg(codec, nid);
435 if (!def_conf) {
436 cfg->hp_outs = 0;
437 return;
438 }
439 if (snd_hda_get_connections(codec, nid, &dac, 1) != 1)
440 return;
441
442 for (i = 0; i < cfg->line_outs; i++)
443 if (dac == spec->dacs[i])
444 break;
445 if (i >= cfg->line_outs) {
446 spec->hp_dac = dac;
447 spec->multiout.hp_nid = dac;
448 }
449}
450
451static void parse_input(struct hda_codec *codec)
452{
453 struct ca0110_spec *spec = codec->spec;
454 struct auto_pin_cfg *cfg = &spec->autocfg;
455 hda_nid_t nid, pin;
456 int n, i, j;
457
458 n = 0;
459 nid = codec->start_nid;
460 for (i = 0; i < codec->num_nodes; i++, nid++) {
461 unsigned int wcaps = get_wcaps(codec, nid);
462 unsigned int type = (wcaps & AC_WCAP_TYPE) >>
463 AC_WCAP_TYPE_SHIFT;
464 if (type != AC_WID_AUD_IN)
465 continue;
466 if (snd_hda_get_connections(codec, nid, &pin, 1) != 1)
467 continue;
468 if (pin == cfg->dig_in_pin) {
469 spec->dig_in = nid;
470 continue;
471 }
472 for (j = 0; j < AUTO_PIN_LAST; j++)
473 if (cfg->input_pins[j] == pin)
474 break;
475 if (j >= AUTO_PIN_LAST)
476 continue;
477 spec->input_pins[n] = pin;
478 spec->input_labels[n] = auto_pin_cfg_labels[j];
479 spec->adcs[n] = nid;
480 n++;
481 }
482 spec->num_inputs = n;
483}
484
485static void parse_digital(struct hda_codec *codec)
486{
487 struct ca0110_spec *spec = codec->spec;
488 struct auto_pin_cfg *cfg = &spec->autocfg;
489
490 if (cfg->dig_outs &&
491 snd_hda_get_connections(codec, cfg->dig_out_pins[0],
492 &spec->dig_out, 1) == 1)
493 spec->multiout.dig_out_nid = cfg->dig_out_pins[0];
494}
495
496static int ca0110_parse_auto_config(struct hda_codec *codec)
497{
498 struct ca0110_spec *spec = codec->spec;
499 int err;
500
501 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
502 if (err < 0)
503 return err;
504
505 parse_line_outs(codec);
506 parse_hp_out(codec);
507 parse_digital(codec);
508 parse_input(codec);
509 return 0;
510}
511
512
513int patch_ca0110(struct hda_codec *codec)
514{
515 struct ca0110_spec *spec;
516 int err;
517
518 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
519 if (!spec)
520 return -ENOMEM;
521 codec->spec = spec;
522
523 codec->bus->needs_damn_long_delay = 1;
524
525 err = ca0110_parse_auto_config(codec);
526 if (err < 0)
527 goto error;
528
529 codec->patch_ops = ca0110_patch_ops;
530
531 return 0;
532
533 error:
534 kfree(codec->spec);
535 codec->spec = NULL;
536 return err;
537}
538
539
540/*
541 * patch entries
542 */
543static struct hda_codec_preset snd_hda_preset_ca0110[] = {
544 { .id = 0x1102000a, .name = "CA0110-IBG", .patch = patch_ca0110 },
545 { .id = 0x1102000b, .name = "CA0110-IBG", .patch = patch_ca0110 },
546 { .id = 0x1102000d, .name = "SB0880 X-Fi", .patch = patch_ca0110 },
547 {} /* terminator */
548};
549
550MODULE_ALIAS("snd-hda-codec-id:1102000a");
551MODULE_ALIAS("snd-hda-codec-id:1102000b");
552MODULE_ALIAS("snd-hda-codec-id:1102000d");
553
554MODULE_LICENSE("GPL");
555MODULE_DESCRIPTION("Creative CA0110-IBG HD-audio codec");
556
557static struct hda_codec_preset_list ca0110_list = {
558 .preset = snd_hda_preset_ca0110,
559 .owner = THIS_MODULE,
560};
561
562static int __init patch_ca0110_init(void)
563{
564 return snd_hda_add_codec_preset(&ca0110_list);
565}
566
567static void __exit patch_ca0110_exit(void)
568{
569 snd_hda_delete_codec_preset(&ca0110_list);
570}
571
572module_init(patch_ca0110_init)
573module_exit(patch_ca0110_exit)
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index d57d8132a06e..f5792e2eea82 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -35,9 +35,28 @@ struct nvhdmi_spec {
35 struct hda_pcm pcm_rec; 35 struct hda_pcm pcm_rec;
36}; 36};
37 37
38#define Nv_VERB_SET_Channel_Allocation 0xF79
39#define Nv_VERB_SET_Info_Frame_Checksum 0xF7A
40#define Nv_VERB_SET_Audio_Protection_On 0xF98
41#define Nv_VERB_SET_Audio_Protection_Off 0xF99
42
43#define Nv_Master_Convert_nid 0x04
44#define Nv_Master_Pin_nid 0x05
45
46static hda_nid_t nvhdmi_convert_nids[4] = {
47 /*front, rear, clfe, rear_surr */
48 0x6, 0x8, 0xa, 0xc,
49};
50
38static struct hda_verb nvhdmi_basic_init[] = { 51static struct hda_verb nvhdmi_basic_init[] = {
52 /* set audio protect on */
53 { 0x1, Nv_VERB_SET_Audio_Protection_On, 0x1},
39 /* enable digital output on pin widget */ 54 /* enable digital output on pin widget */
40 { 0x05, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT }, 55 { 0x5, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x5 },
56 { 0x7, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x5 },
57 { 0x9, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x5 },
58 { 0xb, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x5 },
59 { 0xd, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x5 },
41 {} /* terminator */ 60 {} /* terminator */
42}; 61};
43 62
@@ -66,48 +85,205 @@ static int nvhdmi_init(struct hda_codec *codec)
66 * Digital out 85 * Digital out
67 */ 86 */
68static int nvhdmi_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, 87static int nvhdmi_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
69 struct hda_codec *codec, 88 struct hda_codec *codec,
70 struct snd_pcm_substream *substream) 89 struct snd_pcm_substream *substream)
71{ 90{
72 struct nvhdmi_spec *spec = codec->spec; 91 struct nvhdmi_spec *spec = codec->spec;
73 return snd_hda_multi_out_dig_open(codec, &spec->multiout); 92 return snd_hda_multi_out_dig_open(codec, &spec->multiout);
74} 93}
75 94
76static int nvhdmi_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, 95static int nvhdmi_dig_playback_pcm_close_8ch(struct hda_pcm_stream *hinfo,
77 struct hda_codec *codec, 96 struct hda_codec *codec,
78 struct snd_pcm_substream *substream) 97 struct snd_pcm_substream *substream)
79{ 98{
80 struct nvhdmi_spec *spec = codec->spec; 99 struct nvhdmi_spec *spec = codec->spec;
100 int i;
101
102 snd_hda_codec_write(codec, Nv_Master_Convert_nid,
103 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
104 for (i = 0; i < 4; i++) {
105 /* set the stream id */
106 snd_hda_codec_write(codec, nvhdmi_convert_nids[i], 0,
107 AC_VERB_SET_CHANNEL_STREAMID, 0);
108 /* set the stream format */
109 snd_hda_codec_write(codec, nvhdmi_convert_nids[i], 0,
110 AC_VERB_SET_STREAM_FORMAT, 0);
111 }
112
81 return snd_hda_multi_out_dig_close(codec, &spec->multiout); 113 return snd_hda_multi_out_dig_close(codec, &spec->multiout);
82} 114}
83 115
84static int nvhdmi_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, 116static int nvhdmi_dig_playback_pcm_close_2ch(struct hda_pcm_stream *hinfo,
85 struct hda_codec *codec, 117 struct hda_codec *codec,
86 unsigned int stream_tag, 118 struct snd_pcm_substream *substream)
87 unsigned int format, 119{
88 struct snd_pcm_substream *substream) 120 struct nvhdmi_spec *spec = codec->spec;
121 return snd_hda_multi_out_dig_close(codec, &spec->multiout);
122}
123
124static int nvhdmi_dig_playback_pcm_prepare_8ch(struct hda_pcm_stream *hinfo,
125 struct hda_codec *codec,
126 unsigned int stream_tag,
127 unsigned int format,
128 struct snd_pcm_substream *substream)
129{
130 int chs;
131 unsigned int dataDCC1, dataDCC2, chan, chanmask, channel_id;
132 int i;
133
134 mutex_lock(&codec->spdif_mutex);
135
136 chs = substream->runtime->channels;
137 chan = chs ? (chs - 1) : 1;
138
139 switch (chs) {
140 default:
141 case 0:
142 case 2:
143 chanmask = 0x00;
144 break;
145 case 4:
146 chanmask = 0x08;
147 break;
148 case 6:
149 chanmask = 0x0b;
150 break;
151 case 8:
152 chanmask = 0x13;
153 break;
154 }
155 dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT;
156 dataDCC2 = 0x2;
157
158 /* set the Audio InforFrame Channel Allocation */
159 snd_hda_codec_write(codec, 0x1, 0,
160 Nv_VERB_SET_Channel_Allocation, chanmask);
161
162 /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */
163 if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE))
164 snd_hda_codec_write(codec,
165 Nv_Master_Convert_nid,
166 0,
167 AC_VERB_SET_DIGI_CONVERT_1,
168 codec->spdif_ctls & ~AC_DIG1_ENABLE & 0xff);
169
170 /* set the stream id */
171 snd_hda_codec_write(codec, Nv_Master_Convert_nid, 0,
172 AC_VERB_SET_CHANNEL_STREAMID, (stream_tag << 4) | 0x0);
173
174 /* set the stream format */
175 snd_hda_codec_write(codec, Nv_Master_Convert_nid, 0,
176 AC_VERB_SET_STREAM_FORMAT, format);
177
178 /* turn on again (if needed) */
179 /* enable and set the channel status audio/data flag */
180 if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE)) {
181 snd_hda_codec_write(codec,
182 Nv_Master_Convert_nid,
183 0,
184 AC_VERB_SET_DIGI_CONVERT_1,
185 codec->spdif_ctls & 0xff);
186 snd_hda_codec_write(codec,
187 Nv_Master_Convert_nid,
188 0,
189 AC_VERB_SET_DIGI_CONVERT_2, dataDCC2);
190 }
191
192 for (i = 0; i < 4; i++) {
193 if (chs == 2)
194 channel_id = 0;
195 else
196 channel_id = i * 2;
197
198 /* turn off SPDIF once;
199 *otherwise the IEC958 bits won't be updated
200 */
201 if (codec->spdif_status_reset &&
202 (codec->spdif_ctls & AC_DIG1_ENABLE))
203 snd_hda_codec_write(codec,
204 nvhdmi_convert_nids[i],
205 0,
206 AC_VERB_SET_DIGI_CONVERT_1,
207 codec->spdif_ctls & ~AC_DIG1_ENABLE & 0xff);
208 /* set the stream id */
209 snd_hda_codec_write(codec,
210 nvhdmi_convert_nids[i],
211 0,
212 AC_VERB_SET_CHANNEL_STREAMID,
213 (stream_tag << 4) | channel_id);
214 /* set the stream format */
215 snd_hda_codec_write(codec,
216 nvhdmi_convert_nids[i],
217 0,
218 AC_VERB_SET_STREAM_FORMAT,
219 format);
220 /* turn on again (if needed) */
221 /* enable and set the channel status audio/data flag */
222 if (codec->spdif_status_reset &&
223 (codec->spdif_ctls & AC_DIG1_ENABLE)) {
224 snd_hda_codec_write(codec,
225 nvhdmi_convert_nids[i],
226 0,
227 AC_VERB_SET_DIGI_CONVERT_1,
228 codec->spdif_ctls & 0xff);
229 snd_hda_codec_write(codec,
230 nvhdmi_convert_nids[i],
231 0,
232 AC_VERB_SET_DIGI_CONVERT_2, dataDCC2);
233 }
234 }
235
236 /* set the Audio Info Frame Checksum */
237 snd_hda_codec_write(codec, 0x1, 0,
238 Nv_VERB_SET_Info_Frame_Checksum,
239 (0x71 - chan - chanmask));
240
241 mutex_unlock(&codec->spdif_mutex);
242 return 0;
243}
244
245static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo,
246 struct hda_codec *codec,
247 unsigned int stream_tag,
248 unsigned int format,
249 struct snd_pcm_substream *substream)
89{ 250{
90 struct nvhdmi_spec *spec = codec->spec; 251 struct nvhdmi_spec *spec = codec->spec;
91 return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag, 252 return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
92 format, substream); 253 format, substream);
93} 254}
94 255
95static struct hda_pcm_stream nvhdmi_pcm_digital_playback = { 256static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch = {
257 .substreams = 1,
258 .channels_min = 2,
259 .channels_max = 8,
260 .nid = Nv_Master_Convert_nid,
261 .rates = SNDRV_PCM_RATE_48000,
262 .maxbps = 16,
263 .formats = SNDRV_PCM_FMTBIT_S16_LE,
264 .ops = {
265 .open = nvhdmi_dig_playback_pcm_open,
266 .close = nvhdmi_dig_playback_pcm_close_8ch,
267 .prepare = nvhdmi_dig_playback_pcm_prepare_8ch
268 },
269};
270
271static struct hda_pcm_stream nvhdmi_pcm_digital_playback_2ch = {
96 .substreams = 1, 272 .substreams = 1,
97 .channels_min = 2, 273 .channels_min = 2,
98 .channels_max = 2, 274 .channels_max = 2,
99 .nid = 0x4, /* NID to query formats and rates and setup streams */ 275 .nid = Nv_Master_Convert_nid,
100 .rates = SNDRV_PCM_RATE_48000, 276 .rates = SNDRV_PCM_RATE_48000,
101 .maxbps = 16, 277 .maxbps = 16,
102 .formats = SNDRV_PCM_FMTBIT_S16_LE, 278 .formats = SNDRV_PCM_FMTBIT_S16_LE,
103 .ops = { 279 .ops = {
104 .open = nvhdmi_dig_playback_pcm_open, 280 .open = nvhdmi_dig_playback_pcm_open,
105 .close = nvhdmi_dig_playback_pcm_close, 281 .close = nvhdmi_dig_playback_pcm_close_2ch,
106 .prepare = nvhdmi_dig_playback_pcm_prepare 282 .prepare = nvhdmi_dig_playback_pcm_prepare_2ch
107 }, 283 },
108}; 284};
109 285
110static int nvhdmi_build_pcms(struct hda_codec *codec) 286static int nvhdmi_build_pcms_8ch(struct hda_codec *codec)
111{ 287{
112 struct nvhdmi_spec *spec = codec->spec; 288 struct nvhdmi_spec *spec = codec->spec;
113 struct hda_pcm *info = &spec->pcm_rec; 289 struct hda_pcm *info = &spec->pcm_rec;
@@ -117,7 +293,24 @@ static int nvhdmi_build_pcms(struct hda_codec *codec)
117 293
118 info->name = "NVIDIA HDMI"; 294 info->name = "NVIDIA HDMI";
119 info->pcm_type = HDA_PCM_TYPE_HDMI; 295 info->pcm_type = HDA_PCM_TYPE_HDMI;
120 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = nvhdmi_pcm_digital_playback; 296 info->stream[SNDRV_PCM_STREAM_PLAYBACK]
297 = nvhdmi_pcm_digital_playback_8ch;
298
299 return 0;
300}
301
302static int nvhdmi_build_pcms_2ch(struct hda_codec *codec)
303{
304 struct nvhdmi_spec *spec = codec->spec;
305 struct hda_pcm *info = &spec->pcm_rec;
306
307 codec->num_pcms = 1;
308 codec->pcm_info = info;
309
310 info->name = "NVIDIA HDMI";
311 info->pcm_type = HDA_PCM_TYPE_HDMI;
312 info->stream[SNDRV_PCM_STREAM_PLAYBACK]
313 = nvhdmi_pcm_digital_playback_2ch;
121 314
122 return 0; 315 return 0;
123} 316}
@@ -127,14 +320,40 @@ static void nvhdmi_free(struct hda_codec *codec)
127 kfree(codec->spec); 320 kfree(codec->spec);
128} 321}
129 322
130static struct hda_codec_ops nvhdmi_patch_ops = { 323static struct hda_codec_ops nvhdmi_patch_ops_8ch = {
324 .build_controls = nvhdmi_build_controls,
325 .build_pcms = nvhdmi_build_pcms_8ch,
326 .init = nvhdmi_init,
327 .free = nvhdmi_free,
328};
329
330static struct hda_codec_ops nvhdmi_patch_ops_2ch = {
131 .build_controls = nvhdmi_build_controls, 331 .build_controls = nvhdmi_build_controls,
132 .build_pcms = nvhdmi_build_pcms, 332 .build_pcms = nvhdmi_build_pcms_2ch,
133 .init = nvhdmi_init, 333 .init = nvhdmi_init,
134 .free = nvhdmi_free, 334 .free = nvhdmi_free,
135}; 335};
136 336
137static int patch_nvhdmi(struct hda_codec *codec) 337static int patch_nvhdmi_8ch(struct hda_codec *codec)
338{
339 struct nvhdmi_spec *spec;
340
341 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
342 if (spec == NULL)
343 return -ENOMEM;
344
345 codec->spec = spec;
346
347 spec->multiout.num_dacs = 0; /* no analog */
348 spec->multiout.max_channels = 8;
349 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
350
351 codec->patch_ops = nvhdmi_patch_ops_8ch;
352
353 return 0;
354}
355
356static int patch_nvhdmi_2ch(struct hda_codec *codec)
138{ 357{
139 struct nvhdmi_spec *spec; 358 struct nvhdmi_spec *spec;
140 359
@@ -144,13 +363,11 @@ static int patch_nvhdmi(struct hda_codec *codec)
144 363
145 codec->spec = spec; 364 codec->spec = spec;
146 365
147 spec->multiout.num_dacs = 0; /* no analog */ 366 spec->multiout.num_dacs = 0; /* no analog */
148 spec->multiout.max_channels = 2; 367 spec->multiout.max_channels = 2;
149 spec->multiout.dig_out_nid = 0x4; /* NID for copying analog to digital, 368 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
150 * seems to be unused in pure-digital
151 * case. */
152 369
153 codec->patch_ops = nvhdmi_patch_ops; 370 codec->patch_ops = nvhdmi_patch_ops_2ch;
154 371
155 return 0; 372 return 0;
156} 373}
@@ -159,11 +376,11 @@ static int patch_nvhdmi(struct hda_codec *codec)
159 * patch entries 376 * patch entries
160 */ 377 */
161static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { 378static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
162 { .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi }, 379 { .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
163 { .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi }, 380 { .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
164 { .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi }, 381 { .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi_8ch },
165 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi }, 382 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
166 { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi }, 383 { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
167 {} /* terminator */ 384 {} /* terminator */
168}; 385};
169 386
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0fd258eba3a5..337d2a59c67e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -190,6 +190,7 @@ enum {
190 ALC663_ASUS_MODE6, 190 ALC663_ASUS_MODE6,
191 ALC272_DELL, 191 ALC272_DELL,
192 ALC272_DELL_ZM1, 192 ALC272_DELL_ZM1,
193 ALC272_SAMSUNG_NC10,
193 ALC662_AUTO, 194 ALC662_AUTO,
194 ALC662_MODEL_LAST, 195 ALC662_MODEL_LAST,
195}; 196};
@@ -205,6 +206,7 @@ enum {
205 ALC882_ASUS_A7M, 206 ALC882_ASUS_A7M,
206 ALC885_MACPRO, 207 ALC885_MACPRO,
207 ALC885_MBP3, 208 ALC885_MBP3,
209 ALC885_MB5,
208 ALC885_IMAC24, 210 ALC885_IMAC24,
209 ALC882_AUTO, 211 ALC882_AUTO,
210 ALC882_MODEL_LAST, 212 ALC882_MODEL_LAST,
@@ -218,9 +220,11 @@ enum {
218 ALC883_6ST_DIG, 220 ALC883_6ST_DIG,
219 ALC883_TARGA_DIG, 221 ALC883_TARGA_DIG,
220 ALC883_TARGA_2ch_DIG, 222 ALC883_TARGA_2ch_DIG,
223 ALC883_TARGA_8ch_DIG,
221 ALC883_ACER, 224 ALC883_ACER,
222 ALC883_ACER_ASPIRE, 225 ALC883_ACER_ASPIRE,
223 ALC888_ACER_ASPIRE_4930G, 226 ALC888_ACER_ASPIRE_4930G,
227 ALC888_ACER_ASPIRE_8930G,
224 ALC883_MEDION, 228 ALC883_MEDION,
225 ALC883_MEDION_MD2, 229 ALC883_MEDION_MD2,
226 ALC883_LAPTOP_EAPD, 230 ALC883_LAPTOP_EAPD,
@@ -238,7 +242,9 @@ enum {
238 ALC883_3ST_6ch_INTEL, 242 ALC883_3ST_6ch_INTEL,
239 ALC888_ASUS_M90V, 243 ALC888_ASUS_M90V,
240 ALC888_ASUS_EEE1601, 244 ALC888_ASUS_EEE1601,
245 ALC889A_MB31,
241 ALC1200_ASUS_P5Q, 246 ALC1200_ASUS_P5Q,
247 ALC883_SONY_VAIO_TT,
242 ALC883_AUTO, 248 ALC883_AUTO,
243 ALC883_MODEL_LAST, 249 ALC883_MODEL_LAST,
244}; 250};
@@ -253,6 +259,15 @@ enum {
253/* for GPIO Poll */ 259/* for GPIO Poll */
254#define GPIO_MASK 0x03 260#define GPIO_MASK 0x03
255 261
262/* extra amp-initialization sequence types */
263enum {
264 ALC_INIT_NONE,
265 ALC_INIT_DEFAULT,
266 ALC_INIT_GPIO1,
267 ALC_INIT_GPIO2,
268 ALC_INIT_GPIO3,
269};
270
256struct alc_spec { 271struct alc_spec {
257 /* codec parameterization */ 272 /* codec parameterization */
258 struct snd_kcontrol_new *mixers[5]; /* mixer arrays */ 273 struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
@@ -266,13 +281,13 @@ struct alc_spec {
266 */ 281 */
267 unsigned int num_init_verbs; 282 unsigned int num_init_verbs;
268 283
269 char *stream_name_analog; /* analog PCM stream */ 284 char stream_name_analog[16]; /* analog PCM stream */
270 struct hda_pcm_stream *stream_analog_playback; 285 struct hda_pcm_stream *stream_analog_playback;
271 struct hda_pcm_stream *stream_analog_capture; 286 struct hda_pcm_stream *stream_analog_capture;
272 struct hda_pcm_stream *stream_analog_alt_playback; 287 struct hda_pcm_stream *stream_analog_alt_playback;
273 struct hda_pcm_stream *stream_analog_alt_capture; 288 struct hda_pcm_stream *stream_analog_alt_capture;
274 289
275 char *stream_name_digital; /* digital PCM stream */ 290 char stream_name_digital[16]; /* digital PCM stream */
276 struct hda_pcm_stream *stream_digital_playback; 291 struct hda_pcm_stream *stream_digital_playback;
277 struct hda_pcm_stream *stream_digital_capture; 292 struct hda_pcm_stream *stream_digital_capture;
278 293
@@ -301,6 +316,8 @@ struct alc_spec {
301 const struct hda_channel_mode *channel_mode; 316 const struct hda_channel_mode *channel_mode;
302 int num_channel_mode; 317 int num_channel_mode;
303 int need_dac_fix; 318 int need_dac_fix;
319 int const_channel_count;
320 int ext_channel_count;
304 321
305 /* PCM information */ 322 /* PCM information */
306 struct hda_pcm pcm_rec[3]; /* used in alc_build_pcms() */ 323 struct hda_pcm pcm_rec[3]; /* used in alc_build_pcms() */
@@ -322,6 +339,7 @@ struct alc_spec {
322 339
323 /* other flags */ 340 /* other flags */
324 unsigned int no_analog :1; /* digital I/O only */ 341 unsigned int no_analog :1; /* digital I/O only */
342 int init_amp;
325 343
326 /* for virtual master */ 344 /* for virtual master */
327 hda_nid_t vmaster_nid; 345 hda_nid_t vmaster_nid;
@@ -355,6 +373,7 @@ struct alc_config_preset {
355 unsigned int num_channel_mode; 373 unsigned int num_channel_mode;
356 const struct hda_channel_mode *channel_mode; 374 const struct hda_channel_mode *channel_mode;
357 int need_dac_fix; 375 int need_dac_fix;
376 int const_channel_count;
358 unsigned int num_mux_defs; 377 unsigned int num_mux_defs;
359 const struct hda_input_mux *input_mux; 378 const struct hda_input_mux *input_mux;
360 void (*unsol_event)(struct hda_codec *, unsigned int); 379 void (*unsol_event)(struct hda_codec *, unsigned int);
@@ -449,7 +468,7 @@ static int alc_ch_mode_get(struct snd_kcontrol *kcontrol,
449 struct alc_spec *spec = codec->spec; 468 struct alc_spec *spec = codec->spec;
450 return snd_hda_ch_mode_get(codec, ucontrol, spec->channel_mode, 469 return snd_hda_ch_mode_get(codec, ucontrol, spec->channel_mode,
451 spec->num_channel_mode, 470 spec->num_channel_mode,
452 spec->multiout.max_channels); 471 spec->ext_channel_count);
453} 472}
454 473
455static int alc_ch_mode_put(struct snd_kcontrol *kcontrol, 474static int alc_ch_mode_put(struct snd_kcontrol *kcontrol,
@@ -459,9 +478,12 @@ static int alc_ch_mode_put(struct snd_kcontrol *kcontrol,
459 struct alc_spec *spec = codec->spec; 478 struct alc_spec *spec = codec->spec;
460 int err = snd_hda_ch_mode_put(codec, ucontrol, spec->channel_mode, 479 int err = snd_hda_ch_mode_put(codec, ucontrol, spec->channel_mode,
461 spec->num_channel_mode, 480 spec->num_channel_mode,
462 &spec->multiout.max_channels); 481 &spec->ext_channel_count);
463 if (err >= 0 && spec->need_dac_fix) 482 if (err >= 0 && !spec->const_channel_count) {
464 spec->multiout.num_dacs = spec->multiout.max_channels / 2; 483 spec->multiout.max_channels = spec->ext_channel_count;
484 if (spec->need_dac_fix)
485 spec->multiout.num_dacs = spec->multiout.max_channels / 2;
486 }
465 return err; 487 return err;
466} 488}
467 489
@@ -841,8 +863,13 @@ static void setup_preset(struct alc_spec *spec,
841 spec->channel_mode = preset->channel_mode; 863 spec->channel_mode = preset->channel_mode;
842 spec->num_channel_mode = preset->num_channel_mode; 864 spec->num_channel_mode = preset->num_channel_mode;
843 spec->need_dac_fix = preset->need_dac_fix; 865 spec->need_dac_fix = preset->need_dac_fix;
866 spec->const_channel_count = preset->const_channel_count;
844 867
845 spec->multiout.max_channels = spec->channel_mode[0].channels; 868 if (preset->const_channel_count)
869 spec->multiout.max_channels = preset->const_channel_count;
870 else
871 spec->multiout.max_channels = spec->channel_mode[0].channels;
872 spec->ext_channel_count = spec->channel_mode[0].channels;
846 873
847 spec->multiout.num_dacs = preset->num_dacs; 874 spec->multiout.num_dacs = preset->num_dacs;
848 spec->multiout.dac_nids = preset->dac_nids; 875 spec->multiout.dac_nids = preset->dac_nids;
@@ -921,20 +948,26 @@ static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid,
921 alc_fix_pll(codec); 948 alc_fix_pll(codec);
922} 949}
923 950
924static void alc_sku_automute(struct hda_codec *codec) 951static void alc_automute_pin(struct hda_codec *codec)
925{ 952{
926 struct alc_spec *spec = codec->spec; 953 struct alc_spec *spec = codec->spec;
927 unsigned int present; 954 unsigned int present;
928 unsigned int hp_nid = spec->autocfg.hp_pins[0]; 955 unsigned int nid = spec->autocfg.hp_pins[0];
929 unsigned int sp_nid = spec->autocfg.speaker_pins[0]; 956 int i;
930 957
931 /* need to execute and sync at first */ 958 /* need to execute and sync at first */
932 snd_hda_codec_read(codec, hp_nid, 0, AC_VERB_SET_PIN_SENSE, 0); 959 snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
933 present = snd_hda_codec_read(codec, hp_nid, 0, 960 present = snd_hda_codec_read(codec, nid, 0,
934 AC_VERB_GET_PIN_SENSE, 0); 961 AC_VERB_GET_PIN_SENSE, 0);
935 spec->jack_present = (present & 0x80000000) != 0; 962 spec->jack_present = (present & AC_PINSENSE_PRESENCE) != 0;
936 snd_hda_codec_write(codec, sp_nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 963 for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) {
937 spec->jack_present ? 0 : PIN_OUT); 964 nid = spec->autocfg.speaker_pins[i];
965 if (!nid)
966 break;
967 snd_hda_codec_write(codec, nid, 0,
968 AC_VERB_SET_PIN_WIDGET_CONTROL,
969 spec->jack_present ? 0 : PIN_OUT);
970 }
938} 971}
939 972
940#if 0 /* it's broken in some acses -- temporarily disabled */ 973#if 0 /* it's broken in some acses -- temporarily disabled */
@@ -969,16 +1002,19 @@ static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
969 res >>= 28; 1002 res >>= 28;
970 else 1003 else
971 res >>= 26; 1004 res >>= 26;
972 if (res == ALC880_HP_EVENT) 1005 switch (res) {
973 alc_sku_automute(codec); 1006 case ALC880_HP_EVENT:
974 1007 alc_automute_pin(codec);
975 if (res == ALC880_MIC_EVENT) 1008 break;
1009 case ALC880_MIC_EVENT:
976 alc_mic_automute(codec); 1010 alc_mic_automute(codec);
1011 break;
1012 }
977} 1013}
978 1014
979static void alc_inithook(struct hda_codec *codec) 1015static void alc_inithook(struct hda_codec *codec)
980{ 1016{
981 alc_sku_automute(codec); 1017 alc_automute_pin(codec);
982 alc_mic_automute(codec); 1018 alc_mic_automute(codec);
983} 1019}
984 1020
@@ -1000,69 +1036,21 @@ static void alc888_coef_init(struct hda_codec *codec)
1000 AC_VERB_SET_PROC_COEF, 0x3030); 1036 AC_VERB_SET_PROC_COEF, 0x3030);
1001} 1037}
1002 1038
1003/* 32-bit subsystem ID for BIOS loading in HD Audio codec. 1039static void alc_auto_init_amp(struct hda_codec *codec, int type)
1004 * 31 ~ 16 : Manufacture ID
1005 * 15 ~ 8 : SKU ID
1006 * 7 ~ 0 : Assembly ID
1007 * port-A --> pin 39/41, port-E --> pin 14/15, port-D --> pin 35/36
1008 */
1009static void alc_subsystem_id(struct hda_codec *codec,
1010 unsigned int porta, unsigned int porte,
1011 unsigned int portd)
1012{ 1040{
1013 unsigned int ass, tmp, i; 1041 unsigned int tmp;
1014 unsigned nid;
1015 struct alc_spec *spec = codec->spec;
1016
1017 ass = codec->subsystem_id & 0xffff;
1018 if ((ass != codec->bus->pci->subsystem_device) && (ass & 1))
1019 goto do_sku;
1020
1021 /*
1022 * 31~30 : port conetcivity
1023 * 29~21 : reserve
1024 * 20 : PCBEEP input
1025 * 19~16 : Check sum (15:1)
1026 * 15~1 : Custom
1027 * 0 : override
1028 */
1029 nid = 0x1d;
1030 if (codec->vendor_id == 0x10ec0260)
1031 nid = 0x17;
1032 ass = snd_hda_codec_get_pincfg(codec, nid);
1033 if (!(ass & 1) && !(ass & 0x100000))
1034 return;
1035 if ((ass >> 30) != 1) /* no physical connection */
1036 return;
1037 1042
1038 /* check sum */ 1043 switch (type) {
1039 tmp = 0; 1044 case ALC_INIT_GPIO1:
1040 for (i = 1; i < 16; i++) {
1041 if ((ass >> i) & 1)
1042 tmp++;
1043 }
1044 if (((ass >> 16) & 0xf) != tmp)
1045 return;
1046do_sku:
1047 /*
1048 * 0 : override
1049 * 1 : Swap Jack
1050 * 2 : 0 --> Desktop, 1 --> Laptop
1051 * 3~5 : External Amplifier control
1052 * 7~6 : Reserved
1053 */
1054 tmp = (ass & 0x38) >> 3; /* external Amp control */
1055 switch (tmp) {
1056 case 1:
1057 snd_hda_sequence_write(codec, alc_gpio1_init_verbs); 1045 snd_hda_sequence_write(codec, alc_gpio1_init_verbs);
1058 break; 1046 break;
1059 case 3: 1047 case ALC_INIT_GPIO2:
1060 snd_hda_sequence_write(codec, alc_gpio2_init_verbs); 1048 snd_hda_sequence_write(codec, alc_gpio2_init_verbs);
1061 break; 1049 break;
1062 case 7: 1050 case ALC_INIT_GPIO3:
1063 snd_hda_sequence_write(codec, alc_gpio3_init_verbs); 1051 snd_hda_sequence_write(codec, alc_gpio3_init_verbs);
1064 break; 1052 break;
1065 case 5: /* set EAPD output high */ 1053 case ALC_INIT_DEFAULT:
1066 switch (codec->vendor_id) { 1054 switch (codec->vendor_id) {
1067 case 0x10ec0260: 1055 case 0x10ec0260:
1068 snd_hda_codec_write(codec, 0x0f, 0, 1056 snd_hda_codec_write(codec, 0x0f, 0,
@@ -1116,7 +1104,7 @@ do_sku:
1116 tmp | 0x2010); 1104 tmp | 0x2010);
1117 break; 1105 break;
1118 case 0x10ec0888: 1106 case 0x10ec0888:
1119 /*alc888_coef_init(codec);*/ /* called in alc_init() */ 1107 alc888_coef_init(codec);
1120 break; 1108 break;
1121 case 0x10ec0267: 1109 case 0x10ec0267:
1122 case 0x10ec0268: 1110 case 0x10ec0268:
@@ -1131,7 +1119,107 @@ do_sku:
1131 tmp | 0x3000); 1119 tmp | 0x3000);
1132 break; 1120 break;
1133 } 1121 }
1134 default: 1122 break;
1123 }
1124}
1125
1126static void alc_init_auto_hp(struct hda_codec *codec)
1127{
1128 struct alc_spec *spec = codec->spec;
1129
1130 if (!spec->autocfg.hp_pins[0])
1131 return;
1132
1133 if (!spec->autocfg.speaker_pins[0]) {
1134 if (spec->autocfg.line_out_pins[0] &&
1135 spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
1136 spec->autocfg.speaker_pins[0] =
1137 spec->autocfg.line_out_pins[0];
1138 else
1139 return;
1140 }
1141
1142 snd_printdd("realtek: Enable HP auto-muting on NID 0x%x\n",
1143 spec->autocfg.hp_pins[0]);
1144 snd_hda_codec_write_cache(codec, spec->autocfg.hp_pins[0], 0,
1145 AC_VERB_SET_UNSOLICITED_ENABLE,
1146 AC_USRSP_EN | ALC880_HP_EVENT);
1147 spec->unsol_event = alc_sku_unsol_event;
1148}
1149
1150/* check subsystem ID and set up device-specific initialization;
1151 * return 1 if initialized, 0 if invalid SSID
1152 */
1153/* 32-bit subsystem ID for BIOS loading in HD Audio codec.
1154 * 31 ~ 16 : Manufacture ID
1155 * 15 ~ 8 : SKU ID
1156 * 7 ~ 0 : Assembly ID
1157 * port-A --> pin 39/41, port-E --> pin 14/15, port-D --> pin 35/36
1158 */
1159static int alc_subsystem_id(struct hda_codec *codec,
1160 hda_nid_t porta, hda_nid_t porte,
1161 hda_nid_t portd)
1162{
1163 unsigned int ass, tmp, i;
1164 unsigned nid;
1165 struct alc_spec *spec = codec->spec;
1166
1167 ass = codec->subsystem_id & 0xffff;
1168 if ((ass != codec->bus->pci->subsystem_device) && (ass & 1))
1169 goto do_sku;
1170
1171 /* invalid SSID, check the special NID pin defcfg instead */
1172 /*
1173 * 31~30 : port conetcivity
1174 * 29~21 : reserve
1175 * 20 : PCBEEP input
1176 * 19~16 : Check sum (15:1)
1177 * 15~1 : Custom
1178 * 0 : override
1179 */
1180 nid = 0x1d;
1181 if (codec->vendor_id == 0x10ec0260)
1182 nid = 0x17;
1183 ass = snd_hda_codec_get_pincfg(codec, nid);
1184 snd_printd("realtek: No valid SSID, "
1185 "checking pincfg 0x%08x for NID 0x%x\n",
1186 ass, nid);
1187 if (!(ass & 1) && !(ass & 0x100000))
1188 return 0;
1189 if ((ass >> 30) != 1) /* no physical connection */
1190 return 0;
1191
1192 /* check sum */
1193 tmp = 0;
1194 for (i = 1; i < 16; i++) {
1195 if ((ass >> i) & 1)
1196 tmp++;
1197 }
1198 if (((ass >> 16) & 0xf) != tmp)
1199 return 0;
1200do_sku:
1201 snd_printd("realtek: Enabling init ASM_ID=0x%04x CODEC_ID=%08x\n",
1202 ass & 0xffff, codec->vendor_id);
1203 /*
1204 * 0 : override
1205 * 1 : Swap Jack
1206 * 2 : 0 --> Desktop, 1 --> Laptop
1207 * 3~5 : External Amplifier control
1208 * 7~6 : Reserved
1209 */
1210 tmp = (ass & 0x38) >> 3; /* external Amp control */
1211 switch (tmp) {
1212 case 1:
1213 spec->init_amp = ALC_INIT_GPIO1;
1214 break;
1215 case 3:
1216 spec->init_amp = ALC_INIT_GPIO2;
1217 break;
1218 case 7:
1219 spec->init_amp = ALC_INIT_GPIO3;
1220 break;
1221 case 5:
1222 spec->init_amp = ALC_INIT_DEFAULT;
1135 break; 1223 break;
1136 } 1224 }
1137 1225
@@ -1139,7 +1227,7 @@ do_sku:
1139 * when the external headphone out jack is plugged" 1227 * when the external headphone out jack is plugged"
1140 */ 1228 */
1141 if (!(ass & 0x8000)) 1229 if (!(ass & 0x8000))
1142 return; 1230 return 1;
1143 /* 1231 /*
1144 * 10~8 : Jack location 1232 * 10~8 : Jack location
1145 * 12~11: Headphone out -> 00: PortA, 01: PortE, 02: PortD, 03: Resvered 1233 * 12~11: Headphone out -> 00: PortA, 01: PortE, 02: PortD, 03: Resvered
@@ -1147,14 +1235,6 @@ do_sku:
1147 * 15 : 1 --> enable the function "Mute internal speaker 1235 * 15 : 1 --> enable the function "Mute internal speaker
1148 * when the external headphone out jack is plugged" 1236 * when the external headphone out jack is plugged"
1149 */ 1237 */
1150 if (!spec->autocfg.speaker_pins[0]) {
1151 if (spec->autocfg.line_out_pins[0])
1152 spec->autocfg.speaker_pins[0] =
1153 spec->autocfg.line_out_pins[0];
1154 else
1155 return;
1156 }
1157
1158 if (!spec->autocfg.hp_pins[0]) { 1238 if (!spec->autocfg.hp_pins[0]) {
1159 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 1239 tmp = (ass >> 11) & 0x3; /* HP to chassis */
1160 if (tmp == 0) 1240 if (tmp == 0)
@@ -1164,23 +1244,23 @@ do_sku:
1164 else if (tmp == 2) 1244 else if (tmp == 2)
1165 spec->autocfg.hp_pins[0] = portd; 1245 spec->autocfg.hp_pins[0] = portd;
1166 else 1246 else
1167 return; 1247 return 1;
1168 } 1248 }
1169 if (spec->autocfg.hp_pins[0])
1170 snd_hda_codec_write(codec, spec->autocfg.hp_pins[0], 0,
1171 AC_VERB_SET_UNSOLICITED_ENABLE,
1172 AC_USRSP_EN | ALC880_HP_EVENT);
1173 1249
1174#if 0 /* it's broken in some acses -- temporarily disabled */ 1250 alc_init_auto_hp(codec);
1175 if (spec->autocfg.input_pins[AUTO_PIN_MIC] && 1251 return 1;
1176 spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC]) 1252}
1177 snd_hda_codec_write(codec,
1178 spec->autocfg.input_pins[AUTO_PIN_MIC], 0,
1179 AC_VERB_SET_UNSOLICITED_ENABLE,
1180 AC_USRSP_EN | ALC880_MIC_EVENT);
1181#endif /* disabled */
1182 1253
1183 spec->unsol_event = alc_sku_unsol_event; 1254static void alc_ssid_check(struct hda_codec *codec,
1255 hda_nid_t porta, hda_nid_t porte, hda_nid_t portd)
1256{
1257 if (!alc_subsystem_id(codec, porta, porte, portd)) {
1258 struct alc_spec *spec = codec->spec;
1259 snd_printd("realtek: "
1260 "Enable default setup for auto mode as fallback\n");
1261 spec->init_amp = ALC_INIT_DEFAULT;
1262 alc_init_auto_hp(codec);
1263 }
1184} 1264}
1185 1265
1186/* 1266/*
@@ -1315,32 +1395,58 @@ static struct hda_verb alc888_fujitsu_xa3530_verbs[] = {
1315 {} 1395 {}
1316}; 1396};
1317 1397
1318static void alc888_fujitsu_xa3530_automute(struct hda_codec *codec) 1398static void alc_automute_amp(struct hda_codec *codec)
1319{ 1399{
1320 unsigned int present; 1400 struct alc_spec *spec = codec->spec;
1321 unsigned int bits; 1401 unsigned int val, mute;
1322 /* Line out presence */ 1402 hda_nid_t nid;
1323 present = snd_hda_codec_read(codec, 0x17, 0, 1403 int i;
1324 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 1404
1325 /* HP out presence */ 1405 spec->jack_present = 0;
1326 present = present || snd_hda_codec_read(codec, 0x1b, 0, 1406 for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) {
1327 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 1407 nid = spec->autocfg.hp_pins[i];
1328 bits = present ? HDA_AMP_MUTE : 0; 1408 if (!nid)
1409 break;
1410 val = snd_hda_codec_read(codec, nid, 0,
1411 AC_VERB_GET_PIN_SENSE, 0);
1412 if (val & AC_PINSENSE_PRESENCE) {
1413 spec->jack_present = 1;
1414 break;
1415 }
1416 }
1417
1418 mute = spec->jack_present ? HDA_AMP_MUTE : 0;
1329 /* Toggle internal speakers muting */ 1419 /* Toggle internal speakers muting */
1330 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0, 1420 for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) {
1331 HDA_AMP_MUTE, bits); 1421 nid = spec->autocfg.speaker_pins[i];
1332 /* Toggle internal bass muting */ 1422 if (!nid)
1333 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0, 1423 break;
1334 HDA_AMP_MUTE, bits); 1424 snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0,
1425 HDA_AMP_MUTE, mute);
1426 }
1335} 1427}
1336 1428
1337static void alc888_fujitsu_xa3530_unsol_event(struct hda_codec *codec, 1429static void alc_automute_amp_unsol_event(struct hda_codec *codec,
1338 unsigned int res) 1430 unsigned int res)
1339{ 1431{
1340 if (res >> 26 == ALC880_HP_EVENT) 1432 if (codec->vendor_id == 0x10ec0880)
1341 alc888_fujitsu_xa3530_automute(codec); 1433 res >>= 28;
1434 else
1435 res >>= 26;
1436 if (res == ALC880_HP_EVENT)
1437 alc_automute_amp(codec);
1342} 1438}
1343 1439
1440static void alc888_fujitsu_xa3530_init_hook(struct hda_codec *codec)
1441{
1442 struct alc_spec *spec = codec->spec;
1443
1444 spec->autocfg.hp_pins[0] = 0x17; /* line-out */
1445 spec->autocfg.hp_pins[1] = 0x1b; /* hp */
1446 spec->autocfg.speaker_pins[0] = 0x14; /* speaker */
1447 spec->autocfg.speaker_pins[1] = 0x15; /* bass */
1448 alc_automute_amp(codec);
1449}
1344 1450
1345/* 1451/*
1346 * ALC888 Acer Aspire 4930G model 1452 * ALC888 Acer Aspire 4930G model
@@ -1364,6 +1470,59 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = {
1364 { } 1470 { }
1365}; 1471};
1366 1472
1473/*
1474 * ALC889 Acer Aspire 8930G model
1475 */
1476
1477static struct hda_verb alc889_acer_aspire_8930g_verbs[] = {
1478/* Front Mic: set to PIN_IN (empty by default) */
1479 {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
1480/* Unselect Front Mic by default in input mixer 3 */
1481 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0xb)},
1482/* Enable unsolicited event for HP jack */
1483 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
1484/* Connect Internal Front to Front */
1485 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1486 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1487 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
1488/* Connect Internal Rear to Rear */
1489 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1490 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1491 {0x1b, AC_VERB_SET_CONNECT_SEL, 0x01},
1492/* Connect Internal CLFE to CLFE */
1493 {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1494 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1495 {0x16, AC_VERB_SET_CONNECT_SEL, 0x02},
1496/* Connect HP out to Front */
1497 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP},
1498 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1499 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
1500/* Enable all DACs */
1501/* DAC DISABLE/MUTE 1? */
1502/* setting bits 1-5 disables DAC nids 0x02-0x06 apparently. Init=0x38 */
1503 {0x20, AC_VERB_SET_COEF_INDEX, 0x03},
1504 {0x20, AC_VERB_SET_PROC_COEF, 0x0000},
1505/* DAC DISABLE/MUTE 2? */
1506/* some bit here disables the other DACs. Init=0x4900 */
1507 {0x20, AC_VERB_SET_COEF_INDEX, 0x08},
1508 {0x20, AC_VERB_SET_PROC_COEF, 0x0000},
1509/* Enable amplifiers */
1510 {0x14, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
1511 {0x15, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
1512/* DMIC fix
1513 * This laptop has a stereo digital microphone. The mics are only 1cm apart
1514 * which makes the stereo useless. However, either the mic or the ALC889
1515 * makes the signal become a difference/sum signal instead of standard
1516 * stereo, which is annoying. So instead we flip this bit which makes the
1517 * codec replicate the sum signal to both channels, turning it into a
1518 * normal mono mic.
1519 */
1520/* DMIC_CONTROL? Init value = 0x0001 */
1521 {0x20, AC_VERB_SET_COEF_INDEX, 0x0b},
1522 {0x20, AC_VERB_SET_PROC_COEF, 0x0003},
1523 { }
1524};
1525
1367static struct hda_input_mux alc888_2_capture_sources[2] = { 1526static struct hda_input_mux alc888_2_capture_sources[2] = {
1368 /* Front mic only available on one ADC */ 1527 /* Front mic only available on one ADC */
1369 { 1528 {
@@ -1385,6 +1544,38 @@ static struct hda_input_mux alc888_2_capture_sources[2] = {
1385 } 1544 }
1386}; 1545};
1387 1546
1547static struct hda_input_mux alc889_capture_sources[3] = {
1548 /* Digital mic only available on first "ADC" */
1549 {
1550 .num_items = 5,
1551 .items = {
1552 { "Mic", 0x0 },
1553 { "Line", 0x2 },
1554 { "CD", 0x4 },
1555 { "Front Mic", 0xb },
1556 { "Input Mix", 0xa },
1557 },
1558 },
1559 {
1560 .num_items = 4,
1561 .items = {
1562 { "Mic", 0x0 },
1563 { "Line", 0x2 },
1564 { "CD", 0x4 },
1565 { "Input Mix", 0xa },
1566 },
1567 },
1568 {
1569 .num_items = 4,
1570 .items = {
1571 { "Mic", 0x0 },
1572 { "Line", 0x2 },
1573 { "CD", 0x4 },
1574 { "Input Mix", 0xa },
1575 },
1576 }
1577};
1578
1388static struct snd_kcontrol_new alc888_base_mixer[] = { 1579static struct snd_kcontrol_new alc888_base_mixer[] = {
1389 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 1580 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
1390 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), 1581 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -1407,22 +1598,24 @@ static struct snd_kcontrol_new alc888_base_mixer[] = {
1407 { } /* end */ 1598 { } /* end */
1408}; 1599};
1409 1600
1410static void alc888_acer_aspire_4930g_automute(struct hda_codec *codec) 1601static void alc888_acer_aspire_4930g_init_hook(struct hda_codec *codec)
1411{ 1602{
1412 unsigned int present; 1603 struct alc_spec *spec = codec->spec;
1413 unsigned int bits; 1604
1414 present = snd_hda_codec_read(codec, 0x15, 0, 1605 spec->autocfg.hp_pins[0] = 0x15;
1415 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 1606 spec->autocfg.speaker_pins[0] = 0x14;
1416 bits = present ? HDA_AMP_MUTE : 0; 1607 alc_automute_amp(codec);
1417 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
1418 HDA_AMP_MUTE, bits);
1419} 1608}
1420 1609
1421static void alc888_acer_aspire_4930g_unsol_event(struct hda_codec *codec, 1610static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec)
1422 unsigned int res)
1423{ 1611{
1424 if (res >> 26 == ALC880_HP_EVENT) 1612 struct alc_spec *spec = codec->spec;
1425 alc888_acer_aspire_4930g_automute(codec); 1613
1614 spec->autocfg.hp_pins[0] = 0x15;
1615 spec->autocfg.speaker_pins[0] = 0x14;
1616 spec->autocfg.speaker_pins[1] = 0x16;
1617 spec->autocfg.speaker_pins[2] = 0x1b;
1618 alc_automute_amp(codec);
1426} 1619}
1427 1620
1428/* 1621/*
@@ -2390,21 +2583,6 @@ static struct hda_verb alc880_beep_init_verbs[] = {
2390 { } 2583 { }
2391}; 2584};
2392 2585
2393/* toggle speaker-output according to the hp-jack state */
2394static void alc880_uniwill_hp_automute(struct hda_codec *codec)
2395{
2396 unsigned int present;
2397 unsigned char bits;
2398
2399 present = snd_hda_codec_read(codec, 0x14, 0,
2400 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
2401 bits = present ? HDA_AMP_MUTE : 0;
2402 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
2403 HDA_AMP_MUTE, bits);
2404 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
2405 HDA_AMP_MUTE, bits);
2406}
2407
2408/* auto-toggle front mic */ 2586/* auto-toggle front mic */
2409static void alc880_uniwill_mic_automute(struct hda_codec *codec) 2587static void alc880_uniwill_mic_automute(struct hda_codec *codec)
2410{ 2588{
@@ -2417,9 +2595,14 @@ static void alc880_uniwill_mic_automute(struct hda_codec *codec)
2417 snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits); 2595 snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits);
2418} 2596}
2419 2597
2420static void alc880_uniwill_automute(struct hda_codec *codec) 2598static void alc880_uniwill_init_hook(struct hda_codec *codec)
2421{ 2599{
2422 alc880_uniwill_hp_automute(codec); 2600 struct alc_spec *spec = codec->spec;
2601
2602 spec->autocfg.hp_pins[0] = 0x14;
2603 spec->autocfg.speaker_pins[0] = 0x15;
2604 spec->autocfg.speaker_pins[0] = 0x16;
2605 alc_automute_amp(codec);
2423 alc880_uniwill_mic_automute(codec); 2606 alc880_uniwill_mic_automute(codec);
2424} 2607}
2425 2608
@@ -2430,24 +2613,22 @@ static void alc880_uniwill_unsol_event(struct hda_codec *codec,
2430 * definition. 4bit tag is placed at 28 bit! 2613 * definition. 4bit tag is placed at 28 bit!
2431 */ 2614 */
2432 switch (res >> 28) { 2615 switch (res >> 28) {
2433 case ALC880_HP_EVENT:
2434 alc880_uniwill_hp_automute(codec);
2435 break;
2436 case ALC880_MIC_EVENT: 2616 case ALC880_MIC_EVENT:
2437 alc880_uniwill_mic_automute(codec); 2617 alc880_uniwill_mic_automute(codec);
2438 break; 2618 break;
2619 default:
2620 alc_automute_amp_unsol_event(codec, res);
2621 break;
2439 } 2622 }
2440} 2623}
2441 2624
2442static void alc880_uniwill_p53_hp_automute(struct hda_codec *codec) 2625static void alc880_uniwill_p53_init_hook(struct hda_codec *codec)
2443{ 2626{
2444 unsigned int present; 2627 struct alc_spec *spec = codec->spec;
2445 unsigned char bits;
2446 2628
2447 present = snd_hda_codec_read(codec, 0x14, 0, 2629 spec->autocfg.hp_pins[0] = 0x14;
2448 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 2630 spec->autocfg.speaker_pins[0] = 0x15;
2449 bits = present ? HDA_AMP_MUTE : 0; 2631 alc_automute_amp(codec);
2450 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0, HDA_AMP_MUTE, bits);
2451} 2632}
2452 2633
2453static void alc880_uniwill_p53_dcvol_automute(struct hda_codec *codec) 2634static void alc880_uniwill_p53_dcvol_automute(struct hda_codec *codec)
@@ -2469,10 +2650,10 @@ static void alc880_uniwill_p53_unsol_event(struct hda_codec *codec,
2469 /* Looks like the unsol event is incompatible with the standard 2650 /* Looks like the unsol event is incompatible with the standard
2470 * definition. 4bit tag is placed at 28 bit! 2651 * definition. 4bit tag is placed at 28 bit!
2471 */ 2652 */
2472 if ((res >> 28) == ALC880_HP_EVENT)
2473 alc880_uniwill_p53_hp_automute(codec);
2474 if ((res >> 28) == ALC880_DCVOL_EVENT) 2653 if ((res >> 28) == ALC880_DCVOL_EVENT)
2475 alc880_uniwill_p53_dcvol_automute(codec); 2654 alc880_uniwill_p53_dcvol_automute(codec);
2655 else
2656 alc_automute_amp_unsol_event(codec, res);
2476} 2657}
2477 2658
2478/* 2659/*
@@ -2542,6 +2723,7 @@ static struct hda_verb alc880_pin_asus_init_verbs[] = {
2542/* Enable GPIO mask and set output */ 2723/* Enable GPIO mask and set output */
2543#define alc880_gpio1_init_verbs alc_gpio1_init_verbs 2724#define alc880_gpio1_init_verbs alc_gpio1_init_verbs
2544#define alc880_gpio2_init_verbs alc_gpio2_init_verbs 2725#define alc880_gpio2_init_verbs alc_gpio2_init_verbs
2726#define alc880_gpio3_init_verbs alc_gpio3_init_verbs
2545 2727
2546/* Clevo m520g init */ 2728/* Clevo m520g init */
2547static struct hda_verb alc880_pin_clevo_init_verbs[] = { 2729static struct hda_verb alc880_pin_clevo_init_verbs[] = {
@@ -2704,30 +2886,18 @@ static struct hda_verb alc880_lg_init_verbs[] = {
2704 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 2886 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
2705 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, 2887 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2706 /* jack sense */ 2888 /* jack sense */
2707 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | 0x1}, 2889 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
2708 { } 2890 { }
2709}; 2891};
2710 2892
2711/* toggle speaker-output according to the hp-jack state */ 2893/* toggle speaker-output according to the hp-jack state */
2712static void alc880_lg_automute(struct hda_codec *codec) 2894static void alc880_lg_init_hook(struct hda_codec *codec)
2713{ 2895{
2714 unsigned int present; 2896 struct alc_spec *spec = codec->spec;
2715 unsigned char bits;
2716
2717 present = snd_hda_codec_read(codec, 0x1b, 0,
2718 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
2719 bits = present ? HDA_AMP_MUTE : 0;
2720 snd_hda_codec_amp_stereo(codec, 0x17, HDA_OUTPUT, 0,
2721 HDA_AMP_MUTE, bits);
2722}
2723 2897
2724static void alc880_lg_unsol_event(struct hda_codec *codec, unsigned int res) 2898 spec->autocfg.hp_pins[0] = 0x1b;
2725{ 2899 spec->autocfg.speaker_pins[0] = 0x17;
2726 /* Looks like the unsol event is incompatible with the standard 2900 alc_automute_amp(codec);
2727 * definition. 4bit tag is placed at 28 bit!
2728 */
2729 if ((res >> 28) == 0x01)
2730 alc880_lg_automute(codec);
2731} 2901}
2732 2902
2733/* 2903/*
@@ -2801,30 +2971,18 @@ static struct hda_verb alc880_lg_lw_init_verbs[] = {
2801 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, 2971 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
2802 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, 2972 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2803 /* jack sense */ 2973 /* jack sense */
2804 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | 0x1}, 2974 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
2805 { } 2975 { }
2806}; 2976};
2807 2977
2808/* toggle speaker-output according to the hp-jack state */ 2978/* toggle speaker-output according to the hp-jack state */
2809static void alc880_lg_lw_automute(struct hda_codec *codec) 2979static void alc880_lg_lw_init_hook(struct hda_codec *codec)
2810{ 2980{
2811 unsigned int present; 2981 struct alc_spec *spec = codec->spec;
2812 unsigned char bits;
2813 2982
2814 present = snd_hda_codec_read(codec, 0x1b, 0, 2983 spec->autocfg.hp_pins[0] = 0x1b;
2815 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 2984 spec->autocfg.speaker_pins[0] = 0x14;
2816 bits = present ? HDA_AMP_MUTE : 0; 2985 alc_automute_amp(codec);
2817 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
2818 HDA_AMP_MUTE, bits);
2819}
2820
2821static void alc880_lg_lw_unsol_event(struct hda_codec *codec, unsigned int res)
2822{
2823 /* Looks like the unsol event is incompatible with the standard
2824 * definition. 4bit tag is placed at 28 bit!
2825 */
2826 if ((res >> 28) == 0x01)
2827 alc880_lg_lw_automute(codec);
2828} 2986}
2829 2987
2830static struct snd_kcontrol_new alc880_medion_rim_mixer[] = { 2988static struct snd_kcontrol_new alc880_medion_rim_mixer[] = {
@@ -2871,16 +3029,10 @@ static struct hda_verb alc880_medion_rim_init_verbs[] = {
2871/* toggle speaker-output according to the hp-jack state */ 3029/* toggle speaker-output according to the hp-jack state */
2872static void alc880_medion_rim_automute(struct hda_codec *codec) 3030static void alc880_medion_rim_automute(struct hda_codec *codec)
2873{ 3031{
2874 unsigned int present; 3032 struct alc_spec *spec = codec->spec;
2875 unsigned char bits; 3033 alc_automute_amp(codec);
2876 3034 /* toggle EAPD */
2877 present = snd_hda_codec_read(codec, 0x14, 0, 3035 if (spec->jack_present)
2878 AC_VERB_GET_PIN_SENSE, 0)
2879 & AC_PINSENSE_PRESENCE;
2880 bits = present ? HDA_AMP_MUTE : 0;
2881 snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
2882 HDA_AMP_MUTE, bits);
2883 if (present)
2884 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 0); 3036 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 0);
2885 else 3037 else
2886 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 2); 3038 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 2);
@@ -2896,6 +3048,15 @@ static void alc880_medion_rim_unsol_event(struct hda_codec *codec,
2896 alc880_medion_rim_automute(codec); 3048 alc880_medion_rim_automute(codec);
2897} 3049}
2898 3050
3051static void alc880_medion_rim_init_hook(struct hda_codec *codec)
3052{
3053 struct alc_spec *spec = codec->spec;
3054
3055 spec->autocfg.hp_pins[0] = 0x14;
3056 spec->autocfg.speaker_pins[0] = 0x1b;
3057 alc880_medion_rim_automute(codec);
3058}
3059
2899#ifdef CONFIG_SND_HDA_POWER_SAVE 3060#ifdef CONFIG_SND_HDA_POWER_SAVE
2900static struct hda_amp_list alc880_loopbacks[] = { 3061static struct hda_amp_list alc880_loopbacks[] = {
2901 { 0x0b, HDA_INPUT, 0 }, 3062 { 0x0b, HDA_INPUT, 0 },
@@ -2924,8 +3085,7 @@ static int alc_init(struct hda_codec *codec)
2924 unsigned int i; 3085 unsigned int i;
2925 3086
2926 alc_fix_pll(codec); 3087 alc_fix_pll(codec);
2927 if (codec->vendor_id == 0x10ec0888) 3088 alc_auto_init_amp(codec, spec->init_amp);
2928 alc888_coef_init(codec);
2929 3089
2930 for (i = 0; i < spec->num_init_verbs; i++) 3090 for (i = 0; i < spec->num_init_verbs; i++)
2931 snd_hda_sequence_write(codec, spec->init_verbs[i]); 3091 snd_hda_sequence_write(codec, spec->init_verbs[i]);
@@ -3127,7 +3287,10 @@ static int alc_build_pcms(struct hda_codec *codec)
3127 if (spec->no_analog) 3287 if (spec->no_analog)
3128 goto skip_analog; 3288 goto skip_analog;
3129 3289
3290 snprintf(spec->stream_name_analog, sizeof(spec->stream_name_analog),
3291 "%s Analog", codec->chip_name);
3130 info->name = spec->stream_name_analog; 3292 info->name = spec->stream_name_analog;
3293
3131 if (spec->stream_analog_playback) { 3294 if (spec->stream_analog_playback) {
3132 if (snd_BUG_ON(!spec->multiout.dac_nids)) 3295 if (snd_BUG_ON(!spec->multiout.dac_nids))
3133 return -EINVAL; 3296 return -EINVAL;
@@ -3153,6 +3316,9 @@ static int alc_build_pcms(struct hda_codec *codec)
3153 skip_analog: 3316 skip_analog:
3154 /* SPDIF for stream index #1 */ 3317 /* SPDIF for stream index #1 */
3155 if (spec->multiout.dig_out_nid || spec->dig_in_nid) { 3318 if (spec->multiout.dig_out_nid || spec->dig_in_nid) {
3319 snprintf(spec->stream_name_digital,
3320 sizeof(spec->stream_name_digital),
3321 "%s Digital", codec->chip_name);
3156 codec->num_pcms = 2; 3322 codec->num_pcms = 2;
3157 codec->slave_dig_outs = spec->multiout.slave_dig_outs; 3323 codec->slave_dig_outs = spec->multiout.slave_dig_outs;
3158 info = spec->pcm_rec + 1; 3324 info = spec->pcm_rec + 1;
@@ -3755,7 +3921,7 @@ static struct alc_config_preset alc880_presets[] = {
3755 .channel_mode = alc880_2_jack_modes, 3921 .channel_mode = alc880_2_jack_modes,
3756 .input_mux = &alc880_f1734_capture_source, 3922 .input_mux = &alc880_f1734_capture_source,
3757 .unsol_event = alc880_uniwill_p53_unsol_event, 3923 .unsol_event = alc880_uniwill_p53_unsol_event,
3758 .init_hook = alc880_uniwill_p53_hp_automute, 3924 .init_hook = alc880_uniwill_p53_init_hook,
3759 }, 3925 },
3760 [ALC880_ASUS] = { 3926 [ALC880_ASUS] = {
3761 .mixers = { alc880_asus_mixer }, 3927 .mixers = { alc880_asus_mixer },
@@ -3832,7 +3998,7 @@ static struct alc_config_preset alc880_presets[] = {
3832 .need_dac_fix = 1, 3998 .need_dac_fix = 1,
3833 .input_mux = &alc880_capture_source, 3999 .input_mux = &alc880_capture_source,
3834 .unsol_event = alc880_uniwill_unsol_event, 4000 .unsol_event = alc880_uniwill_unsol_event,
3835 .init_hook = alc880_uniwill_automute, 4001 .init_hook = alc880_uniwill_init_hook,
3836 }, 4002 },
3837 [ALC880_UNIWILL_P53] = { 4003 [ALC880_UNIWILL_P53] = {
3838 .mixers = { alc880_uniwill_p53_mixer }, 4004 .mixers = { alc880_uniwill_p53_mixer },
@@ -3844,7 +4010,7 @@ static struct alc_config_preset alc880_presets[] = {
3844 .channel_mode = alc880_threestack_modes, 4010 .channel_mode = alc880_threestack_modes,
3845 .input_mux = &alc880_capture_source, 4011 .input_mux = &alc880_capture_source,
3846 .unsol_event = alc880_uniwill_p53_unsol_event, 4012 .unsol_event = alc880_uniwill_p53_unsol_event,
3847 .init_hook = alc880_uniwill_p53_hp_automute, 4013 .init_hook = alc880_uniwill_p53_init_hook,
3848 }, 4014 },
3849 [ALC880_FUJITSU] = { 4015 [ALC880_FUJITSU] = {
3850 .mixers = { alc880_fujitsu_mixer }, 4016 .mixers = { alc880_fujitsu_mixer },
@@ -3858,7 +4024,7 @@ static struct alc_config_preset alc880_presets[] = {
3858 .channel_mode = alc880_2_jack_modes, 4024 .channel_mode = alc880_2_jack_modes,
3859 .input_mux = &alc880_capture_source, 4025 .input_mux = &alc880_capture_source,
3860 .unsol_event = alc880_uniwill_p53_unsol_event, 4026 .unsol_event = alc880_uniwill_p53_unsol_event,
3861 .init_hook = alc880_uniwill_p53_hp_automute, 4027 .init_hook = alc880_uniwill_p53_init_hook,
3862 }, 4028 },
3863 [ALC880_CLEVO] = { 4029 [ALC880_CLEVO] = {
3864 .mixers = { alc880_three_stack_mixer }, 4030 .mixers = { alc880_three_stack_mixer },
@@ -3883,8 +4049,8 @@ static struct alc_config_preset alc880_presets[] = {
3883 .channel_mode = alc880_lg_ch_modes, 4049 .channel_mode = alc880_lg_ch_modes,
3884 .need_dac_fix = 1, 4050 .need_dac_fix = 1,
3885 .input_mux = &alc880_lg_capture_source, 4051 .input_mux = &alc880_lg_capture_source,
3886 .unsol_event = alc880_lg_unsol_event, 4052 .unsol_event = alc_automute_amp_unsol_event,
3887 .init_hook = alc880_lg_automute, 4053 .init_hook = alc880_lg_init_hook,
3888#ifdef CONFIG_SND_HDA_POWER_SAVE 4054#ifdef CONFIG_SND_HDA_POWER_SAVE
3889 .loopbacks = alc880_lg_loopbacks, 4055 .loopbacks = alc880_lg_loopbacks,
3890#endif 4056#endif
@@ -3899,8 +4065,8 @@ static struct alc_config_preset alc880_presets[] = {
3899 .num_channel_mode = ARRAY_SIZE(alc880_lg_lw_modes), 4065 .num_channel_mode = ARRAY_SIZE(alc880_lg_lw_modes),
3900 .channel_mode = alc880_lg_lw_modes, 4066 .channel_mode = alc880_lg_lw_modes,
3901 .input_mux = &alc880_lg_lw_capture_source, 4067 .input_mux = &alc880_lg_lw_capture_source,
3902 .unsol_event = alc880_lg_lw_unsol_event, 4068 .unsol_event = alc_automute_amp_unsol_event,
3903 .init_hook = alc880_lg_lw_automute, 4069 .init_hook = alc880_lg_lw_init_hook,
3904 }, 4070 },
3905 [ALC880_MEDION_RIM] = { 4071 [ALC880_MEDION_RIM] = {
3906 .mixers = { alc880_medion_rim_mixer }, 4072 .mixers = { alc880_medion_rim_mixer },
@@ -3914,7 +4080,7 @@ static struct alc_config_preset alc880_presets[] = {
3914 .channel_mode = alc880_2_jack_modes, 4080 .channel_mode = alc880_2_jack_modes,
3915 .input_mux = &alc880_medion_rim_capture_source, 4081 .input_mux = &alc880_medion_rim_capture_source,
3916 .unsol_event = alc880_medion_rim_unsol_event, 4082 .unsol_event = alc880_medion_rim_unsol_event,
3917 .init_hook = alc880_medion_rim_automute, 4083 .init_hook = alc880_medion_rim_init_hook,
3918 }, 4084 },
3919#ifdef CONFIG_SND_DEBUG 4085#ifdef CONFIG_SND_DEBUG
3920 [ALC880_TEST] = { 4086 [ALC880_TEST] = {
@@ -4199,7 +4365,6 @@ static void alc880_auto_init_multi_out(struct hda_codec *codec)
4199 struct alc_spec *spec = codec->spec; 4365 struct alc_spec *spec = codec->spec;
4200 int i; 4366 int i;
4201 4367
4202 alc_subsystem_id(codec, 0x15, 0x1b, 0x14);
4203 for (i = 0; i < spec->autocfg.line_outs; i++) { 4368 for (i = 0; i < spec->autocfg.line_outs; i++) {
4204 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 4369 hda_nid_t nid = spec->autocfg.line_out_pins[i];
4205 int pin_type = get_pin_type(spec->autocfg.line_out_type); 4370 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -4304,6 +4469,8 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
4304 spec->num_mux_defs = 1; 4469 spec->num_mux_defs = 1;
4305 spec->input_mux = &spec->private_imux[0]; 4470 spec->input_mux = &spec->private_imux[0];
4306 4471
4472 alc_ssid_check(codec, 0x15, 0x1b, 0x14);
4473
4307 return 1; 4474 return 1;
4308} 4475}
4309 4476
@@ -4361,8 +4528,8 @@ static int patch_alc880(struct hda_codec *codec)
4361 alc880_models, 4528 alc880_models,
4362 alc880_cfg_tbl); 4529 alc880_cfg_tbl);
4363 if (board_config < 0) { 4530 if (board_config < 0) {
4364 printk(KERN_INFO "hda_codec: Unknown model for ALC880, " 4531 printk(KERN_INFO "hda_codec: Unknown model for %s, "
4365 "trying auto-probe from BIOS...\n"); 4532 "trying auto-probe from BIOS...\n", codec->chip_name);
4366 board_config = ALC880_AUTO; 4533 board_config = ALC880_AUTO;
4367 } 4534 }
4368 4535
@@ -4389,12 +4556,10 @@ static int patch_alc880(struct hda_codec *codec)
4389 if (board_config != ALC880_AUTO) 4556 if (board_config != ALC880_AUTO)
4390 setup_preset(spec, &alc880_presets[board_config]); 4557 setup_preset(spec, &alc880_presets[board_config]);
4391 4558
4392 spec->stream_name_analog = "ALC880 Analog";
4393 spec->stream_analog_playback = &alc880_pcm_analog_playback; 4559 spec->stream_analog_playback = &alc880_pcm_analog_playback;
4394 spec->stream_analog_capture = &alc880_pcm_analog_capture; 4560 spec->stream_analog_capture = &alc880_pcm_analog_capture;
4395 spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture; 4561 spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
4396 4562
4397 spec->stream_name_digital = "ALC880 Digital";
4398 spec->stream_digital_playback = &alc880_pcm_digital_playback; 4563 spec->stream_digital_playback = &alc880_pcm_digital_playback;
4399 spec->stream_digital_capture = &alc880_pcm_digital_capture; 4564 spec->stream_digital_capture = &alc880_pcm_digital_capture;
4400 4565
@@ -5679,7 +5844,6 @@ static void alc260_auto_init_multi_out(struct hda_codec *codec)
5679 struct alc_spec *spec = codec->spec; 5844 struct alc_spec *spec = codec->spec;
5680 hda_nid_t nid; 5845 hda_nid_t nid;
5681 5846
5682 alc_subsystem_id(codec, 0x10, 0x15, 0x0f);
5683 nid = spec->autocfg.line_out_pins[0]; 5847 nid = spec->autocfg.line_out_pins[0];
5684 if (nid) { 5848 if (nid) {
5685 int pin_type = get_pin_type(spec->autocfg.line_out_type); 5849 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -5789,6 +5953,8 @@ static int alc260_parse_auto_config(struct hda_codec *codec)
5789 spec->num_mux_defs = 1; 5953 spec->num_mux_defs = 1;
5790 spec->input_mux = &spec->private_imux[0]; 5954 spec->input_mux = &spec->private_imux[0];
5791 5955
5956 alc_ssid_check(codec, 0x10, 0x15, 0x0f);
5957
5792 return 1; 5958 return 1;
5793} 5959}
5794 5960
@@ -6006,8 +6172,9 @@ static int patch_alc260(struct hda_codec *codec)
6006 alc260_models, 6172 alc260_models,
6007 alc260_cfg_tbl); 6173 alc260_cfg_tbl);
6008 if (board_config < 0) { 6174 if (board_config < 0) {
6009 snd_printd(KERN_INFO "hda_codec: Unknown model for ALC260, " 6175 snd_printd(KERN_INFO "hda_codec: Unknown model for %s, "
6010 "trying auto-probe from BIOS...\n"); 6176 "trying auto-probe from BIOS...\n",
6177 codec->chip_name);
6011 board_config = ALC260_AUTO; 6178 board_config = ALC260_AUTO;
6012 } 6179 }
6013 6180
@@ -6034,11 +6201,9 @@ static int patch_alc260(struct hda_codec *codec)
6034 if (board_config != ALC260_AUTO) 6201 if (board_config != ALC260_AUTO)
6035 setup_preset(spec, &alc260_presets[board_config]); 6202 setup_preset(spec, &alc260_presets[board_config]);
6036 6203
6037 spec->stream_name_analog = "ALC260 Analog";
6038 spec->stream_analog_playback = &alc260_pcm_analog_playback; 6204 spec->stream_analog_playback = &alc260_pcm_analog_playback;
6039 spec->stream_analog_capture = &alc260_pcm_analog_capture; 6205 spec->stream_analog_capture = &alc260_pcm_analog_capture;
6040 6206
6041 spec->stream_name_digital = "ALC260 Digital";
6042 spec->stream_digital_playback = &alc260_pcm_digital_playback; 6207 spec->stream_digital_playback = &alc260_pcm_digital_playback;
6043 spec->stream_digital_capture = &alc260_pcm_digital_capture; 6208 spec->stream_digital_capture = &alc260_pcm_digital_capture;
6044 6209
@@ -6115,6 +6280,16 @@ static struct hda_input_mux alc882_capture_source = {
6115 { "CD", 0x4 }, 6280 { "CD", 0x4 },
6116 }, 6281 },
6117}; 6282};
6283
6284static struct hda_input_mux mb5_capture_source = {
6285 .num_items = 3,
6286 .items = {
6287 { "Mic", 0x1 },
6288 { "Line", 0x2 },
6289 { "CD", 0x4 },
6290 },
6291};
6292
6118/* 6293/*
6119 * 2ch mode 6294 * 2ch mode
6120 */ 6295 */
@@ -6202,6 +6377,34 @@ static struct hda_channel_mode alc885_mbp_6ch_modes[2] = {
6202 { 6, alc885_mbp_ch6_init }, 6377 { 6, alc885_mbp_ch6_init },
6203}; 6378};
6204 6379
6380/*
6381 * 2ch
6382 * Speakers/Woofer/HP = Front
6383 * LineIn = Input
6384 */
6385static struct hda_verb alc885_mb5_ch2_init[] = {
6386 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
6387 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
6388 { } /* end */
6389};
6390
6391/*
6392 * 6ch mode
6393 * Speakers/HP = Front
6394 * Woofer = LFE
6395 * LineIn = Surround
6396 */
6397static struct hda_verb alc885_mb5_ch6_init[] = {
6398 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
6399 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6400 {0x15, AC_VERB_SET_CONNECT_SEL, 0x01},
6401 { } /* end */
6402};
6403
6404static struct hda_channel_mode alc885_mb5_6ch_modes[2] = {
6405 { 2, alc885_mb5_ch2_init },
6406 { 6, alc885_mb5_ch6_init },
6407};
6205 6408
6206/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17 6409/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17
6207 * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b 6410 * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
@@ -6244,6 +6447,25 @@ static struct snd_kcontrol_new alc885_mbp3_mixer[] = {
6244 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT), 6447 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
6245 { } /* end */ 6448 { } /* end */
6246}; 6449};
6450
6451static struct snd_kcontrol_new alc885_mb5_mixer[] = {
6452 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
6453 HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
6454 HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
6455 HDA_BIND_MUTE ("Surround Playback Switch", 0x0d, 0x02, HDA_INPUT),
6456 HDA_CODEC_VOLUME("LFE Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
6457 HDA_BIND_MUTE ("LFE Playback Switch", 0x0e, 0x02, HDA_INPUT),
6458 HDA_CODEC_VOLUME("HP Playback Volume", 0x0f, 0x00, HDA_OUTPUT),
6459 HDA_BIND_MUTE ("HP Playback Switch", 0x0f, 0x02, HDA_INPUT),
6460 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
6461 HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
6462 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
6463 HDA_CODEC_MUTE ("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
6464 HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
6465 HDA_CODEC_VOLUME("Mic Boost", 0x19, 0x00, HDA_INPUT),
6466 { } /* end */
6467};
6468
6247static struct snd_kcontrol_new alc882_w2jc_mixer[] = { 6469static struct snd_kcontrol_new alc882_w2jc_mixer[] = {
6248 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 6470 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
6249 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), 6471 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -6471,6 +6693,55 @@ static struct hda_verb alc882_macpro_init_verbs[] = {
6471 { } 6693 { }
6472}; 6694};
6473 6695
6696/* Macbook 5,1 */
6697static struct hda_verb alc885_mb5_init_verbs[] = {
6698 /* DACs */
6699 {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6700 {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6701 {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6702 {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6703 /* Front mixer */
6704 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
6705 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6706 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
6707 /* Surround mixer */
6708 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
6709 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6710 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
6711 /* LFE mixer */
6712 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
6713 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6714 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
6715 /* HP mixer */
6716 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
6717 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6718 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
6719 /* Front Pin (0x0c) */
6720 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x01},
6721 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6722 {0x18, AC_VERB_SET_CONNECT_SEL, 0x00},
6723 /* LFE Pin (0x0e) */
6724 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x01},
6725 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6726 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x02},
6727 /* HP Pin (0x0f) */
6728 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
6729 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
6730 {0x14, AC_VERB_SET_CONNECT_SEL, 0x03},
6731 /* Front Mic pin: input vref at 80% */
6732 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
6733 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
6734 /* Line In pin */
6735 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
6736 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
6737
6738 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
6739 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
6740 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
6741 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
6742 { }
6743};
6744
6474/* Macbook Pro rev3 */ 6745/* Macbook Pro rev3 */
6475static struct hda_verb alc885_mbp3_init_verbs[] = { 6746static struct hda_verb alc885_mbp3_init_verbs[] = {
6476 /* Front mixer: unmute input/output amp left and right (volume = 0) */ 6747 /* Front mixer: unmute input/output amp left and right (volume = 0) */
@@ -6560,45 +6831,23 @@ static struct hda_verb alc885_imac24_init_verbs[] = {
6560}; 6831};
6561 6832
6562/* Toggle speaker-output according to the hp-jack state */ 6833/* Toggle speaker-output according to the hp-jack state */
6563static void alc885_imac24_automute(struct hda_codec *codec) 6834static void alc885_imac24_automute_init_hook(struct hda_codec *codec)
6564{ 6835{
6565 unsigned int present; 6836 struct alc_spec *spec = codec->spec;
6566
6567 present = snd_hda_codec_read(codec, 0x14, 0,
6568 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
6569 snd_hda_codec_amp_stereo(codec, 0x18, HDA_OUTPUT, 0,
6570 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
6571 snd_hda_codec_amp_stereo(codec, 0x1a, HDA_OUTPUT, 0,
6572 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
6573}
6574 6837
6575/* Processes unsolicited events. */ 6838 spec->autocfg.hp_pins[0] = 0x14;
6576static void alc885_imac24_unsol_event(struct hda_codec *codec, 6839 spec->autocfg.speaker_pins[0] = 0x18;
6577 unsigned int res) 6840 spec->autocfg.speaker_pins[1] = 0x1a;
6578{ 6841 alc_automute_amp(codec);
6579 /* Headphone insertion or removal. */
6580 if ((res >> 26) == ALC880_HP_EVENT)
6581 alc885_imac24_automute(codec);
6582} 6842}
6583 6843
6584static void alc885_mbp3_automute(struct hda_codec *codec) 6844static void alc885_mbp3_init_hook(struct hda_codec *codec)
6585{ 6845{
6586 unsigned int present; 6846 struct alc_spec *spec = codec->spec;
6587
6588 present = snd_hda_codec_read(codec, 0x15, 0,
6589 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
6590 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
6591 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
6592 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
6593 HDA_AMP_MUTE, present ? 0 : HDA_AMP_MUTE);
6594 6847
6595} 6848 spec->autocfg.hp_pins[0] = 0x15;
6596static void alc885_mbp3_unsol_event(struct hda_codec *codec, 6849 spec->autocfg.speaker_pins[0] = 0x14;
6597 unsigned int res) 6850 alc_automute_amp(codec);
6598{
6599 /* Headphone insertion or removal. */
6600 if ((res >> 26) == ALC880_HP_EVENT)
6601 alc885_mbp3_automute(codec);
6602} 6851}
6603 6852
6604 6853
@@ -6623,24 +6872,25 @@ static struct hda_verb alc882_targa_verbs[] = {
6623/* toggle speaker-output according to the hp-jack state */ 6872/* toggle speaker-output according to the hp-jack state */
6624static void alc882_targa_automute(struct hda_codec *codec) 6873static void alc882_targa_automute(struct hda_codec *codec)
6625{ 6874{
6626 unsigned int present; 6875 struct alc_spec *spec = codec->spec;
6627 6876 alc_automute_amp(codec);
6628 present = snd_hda_codec_read(codec, 0x14, 0,
6629 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
6630 snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
6631 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
6632 snd_hda_codec_write_cache(codec, 1, 0, AC_VERB_SET_GPIO_DATA, 6877 snd_hda_codec_write_cache(codec, 1, 0, AC_VERB_SET_GPIO_DATA,
6633 present ? 1 : 3); 6878 spec->jack_present ? 1 : 3);
6879}
6880
6881static void alc882_targa_init_hook(struct hda_codec *codec)
6882{
6883 struct alc_spec *spec = codec->spec;
6884
6885 spec->autocfg.hp_pins[0] = 0x14;
6886 spec->autocfg.speaker_pins[0] = 0x1b;
6887 alc882_targa_automute(codec);
6634} 6888}
6635 6889
6636static void alc882_targa_unsol_event(struct hda_codec *codec, unsigned int res) 6890static void alc882_targa_unsol_event(struct hda_codec *codec, unsigned int res)
6637{ 6891{
6638 /* Looks like the unsol event is incompatible with the standard 6892 if ((res >> 26) == ALC880_HP_EVENT)
6639 * definition. 4bit tag is placed at 26 bit!
6640 */
6641 if (((res >> 26) == ALC880_HP_EVENT)) {
6642 alc882_targa_automute(codec); 6893 alc882_targa_automute(codec);
6643 }
6644} 6894}
6645 6895
6646static struct hda_verb alc882_asus_a7j_verbs[] = { 6896static struct hda_verb alc882_asus_a7j_verbs[] = {
@@ -6722,7 +6972,7 @@ static void alc885_macpro_init_hook(struct hda_codec *codec)
6722static void alc885_imac24_init_hook(struct hda_codec *codec) 6972static void alc885_imac24_init_hook(struct hda_codec *codec)
6723{ 6973{
6724 alc885_macpro_init_hook(codec); 6974 alc885_macpro_init_hook(codec);
6725 alc885_imac24_automute(codec); 6975 alc885_imac24_automute_init_hook(codec);
6726} 6976}
6727 6977
6728/* 6978/*
@@ -6815,6 +7065,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = {
6815 [ALC882_ASUS_A7J] = "asus-a7j", 7065 [ALC882_ASUS_A7J] = "asus-a7j",
6816 [ALC882_ASUS_A7M] = "asus-a7m", 7066 [ALC882_ASUS_A7M] = "asus-a7m",
6817 [ALC885_MACPRO] = "macpro", 7067 [ALC885_MACPRO] = "macpro",
7068 [ALC885_MB5] = "mb5",
6818 [ALC885_MBP3] = "mbp3", 7069 [ALC885_MBP3] = "mbp3",
6819 [ALC885_IMAC24] = "imac24", 7070 [ALC885_IMAC24] = "imac24",
6820 [ALC882_AUTO] = "auto", 7071 [ALC882_AUTO] = "auto",
@@ -6892,8 +7143,20 @@ static struct alc_config_preset alc882_presets[] = {
6892 .input_mux = &alc882_capture_source, 7143 .input_mux = &alc882_capture_source,
6893 .dig_out_nid = ALC882_DIGOUT_NID, 7144 .dig_out_nid = ALC882_DIGOUT_NID,
6894 .dig_in_nid = ALC882_DIGIN_NID, 7145 .dig_in_nid = ALC882_DIGIN_NID,
6895 .unsol_event = alc885_mbp3_unsol_event, 7146 .unsol_event = alc_automute_amp_unsol_event,
6896 .init_hook = alc885_mbp3_automute, 7147 .init_hook = alc885_mbp3_init_hook,
7148 },
7149 [ALC885_MB5] = {
7150 .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
7151 .init_verbs = { alc885_mb5_init_verbs,
7152 alc880_gpio1_init_verbs },
7153 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7154 .dac_nids = alc882_dac_nids,
7155 .channel_mode = alc885_mb5_6ch_modes,
7156 .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
7157 .input_mux = &mb5_capture_source,
7158 .dig_out_nid = ALC882_DIGOUT_NID,
7159 .dig_in_nid = ALC882_DIGIN_NID,
6897 }, 7160 },
6898 [ALC885_MACPRO] = { 7161 [ALC885_MACPRO] = {
6899 .mixers = { alc882_macpro_mixer }, 7162 .mixers = { alc882_macpro_mixer },
@@ -6917,7 +7180,7 @@ static struct alc_config_preset alc882_presets[] = {
6917 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes), 7180 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
6918 .channel_mode = alc882_ch_modes, 7181 .channel_mode = alc882_ch_modes,
6919 .input_mux = &alc882_capture_source, 7182 .input_mux = &alc882_capture_source,
6920 .unsol_event = alc885_imac24_unsol_event, 7183 .unsol_event = alc_automute_amp_unsol_event,
6921 .init_hook = alc885_imac24_init_hook, 7184 .init_hook = alc885_imac24_init_hook,
6922 }, 7185 },
6923 [ALC882_TARGA] = { 7186 [ALC882_TARGA] = {
@@ -6934,7 +7197,7 @@ static struct alc_config_preset alc882_presets[] = {
6934 .need_dac_fix = 1, 7197 .need_dac_fix = 1,
6935 .input_mux = &alc882_capture_source, 7198 .input_mux = &alc882_capture_source,
6936 .unsol_event = alc882_targa_unsol_event, 7199 .unsol_event = alc882_targa_unsol_event,
6937 .init_hook = alc882_targa_automute, 7200 .init_hook = alc882_targa_init_hook,
6938 }, 7201 },
6939 [ALC882_ASUS_A7J] = { 7202 [ALC882_ASUS_A7J] = {
6940 .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer }, 7203 .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
@@ -7014,7 +7277,6 @@ static void alc882_auto_init_multi_out(struct hda_codec *codec)
7014 struct alc_spec *spec = codec->spec; 7277 struct alc_spec *spec = codec->spec;
7015 int i; 7278 int i;
7016 7279
7017 alc_subsystem_id(codec, 0x15, 0x1b, 0x14);
7018 for (i = 0; i <= HDA_SIDE; i++) { 7280 for (i = 0; i <= HDA_SIDE; i++) {
7019 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 7281 hda_nid_t nid = spec->autocfg.line_out_pins[i];
7020 int pin_type = get_pin_type(spec->autocfg.line_out_type); 7282 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -7197,10 +7459,17 @@ static int patch_alc882(struct hda_codec *codec)
7197 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */ 7459 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
7198 case 0x106b00a4: /* MacbookPro4,1 */ 7460 case 0x106b00a4: /* MacbookPro4,1 */
7199 case 0x106b2c00: /* Macbook Pro rev3 */ 7461 case 0x106b2c00: /* Macbook Pro rev3 */
7200 case 0x106b3600: /* Macbook 3.1 */ 7462 /* Macbook 3.1 (0x106b3600) is handled by patch_alc883() */
7201 case 0x106b3800: /* MacbookPro4,1 - latter revision */ 7463 case 0x106b3800: /* MacbookPro4,1 - latter revision */
7202 board_config = ALC885_MBP3; 7464 board_config = ALC885_MBP3;
7203 break; 7465 break;
7466 case 0x106b3f00: /* Macbook 5,1 */
7467 case 0x106b4000: /* Macbook Pro 5,1 - FIXME: HP jack sense
7468 * seems not working, so apparently
7469 * no perfect solution yet
7470 */
7471 board_config = ALC885_MB5;
7472 break;
7204 default: 7473 default:
7205 /* ALC889A is handled better as ALC888-compatible */ 7474 /* ALC889A is handled better as ALC888-compatible */
7206 if (codec->revision_id == 0x100101 || 7475 if (codec->revision_id == 0x100101 ||
@@ -7208,8 +7477,9 @@ static int patch_alc882(struct hda_codec *codec)
7208 alc_free(codec); 7477 alc_free(codec);
7209 return patch_alc883(codec); 7478 return patch_alc883(codec);
7210 } 7479 }
7211 printk(KERN_INFO "hda_codec: Unknown model for ALC882, " 7480 printk(KERN_INFO "hda_codec: Unknown model for %s, "
7212 "trying auto-probe from BIOS...\n"); 7481 "trying auto-probe from BIOS...\n",
7482 codec->chip_name);
7213 board_config = ALC882_AUTO; 7483 board_config = ALC882_AUTO;
7214 } 7484 }
7215 } 7485 }
@@ -7239,14 +7509,6 @@ static int patch_alc882(struct hda_codec *codec)
7239 if (board_config != ALC882_AUTO) 7509 if (board_config != ALC882_AUTO)
7240 setup_preset(spec, &alc882_presets[board_config]); 7510 setup_preset(spec, &alc882_presets[board_config]);
7241 7511
7242 if (codec->vendor_id == 0x10ec0885) {
7243 spec->stream_name_analog = "ALC885 Analog";
7244 spec->stream_name_digital = "ALC885 Digital";
7245 } else {
7246 spec->stream_name_analog = "ALC882 Analog";
7247 spec->stream_name_digital = "ALC882 Digital";
7248 }
7249
7250 spec->stream_analog_playback = &alc882_pcm_analog_playback; 7512 spec->stream_analog_playback = &alc882_pcm_analog_playback;
7251 spec->stream_analog_capture = &alc882_pcm_analog_capture; 7513 spec->stream_analog_capture = &alc882_pcm_analog_capture;
7252 /* FIXME: setup DAC5 */ 7514 /* FIXME: setup DAC5 */
@@ -7399,6 +7661,17 @@ static struct hda_input_mux alc883_asus_eee1601_capture_source = {
7399 }, 7661 },
7400}; 7662};
7401 7663
7664static struct hda_input_mux alc889A_mb31_capture_source = {
7665 .num_items = 2,
7666 .items = {
7667 { "Mic", 0x0 },
7668 /* Front Mic (0x01) unused */
7669 { "Line", 0x2 },
7670 /* Line 2 (0x03) unused */
7671 /* CD (0x04) unsused? */
7672 },
7673};
7674
7402/* 7675/*
7403 * 2ch mode 7676 * 2ch mode
7404 */ 7677 */
@@ -7448,6 +7721,73 @@ static struct hda_channel_mode alc883_3ST_6ch_modes[3] = {
7448 { 6, alc883_3ST_ch6_init }, 7721 { 6, alc883_3ST_ch6_init },
7449}; 7722};
7450 7723
7724
7725/*
7726 * 2ch mode
7727 */
7728static struct hda_verb alc883_4ST_ch2_init[] = {
7729 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7730 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7731 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7732 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7733 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
7734 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7735 { } /* end */
7736};
7737
7738/*
7739 * 4ch mode
7740 */
7741static struct hda_verb alc883_4ST_ch4_init[] = {
7742 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7743 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7744 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7745 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7746 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7747 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7748 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7749 { } /* end */
7750};
7751
7752/*
7753 * 6ch mode
7754 */
7755static struct hda_verb alc883_4ST_ch6_init[] = {
7756 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7757 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7758 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7759 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7760 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
7761 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7762 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7763 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7764 { } /* end */
7765};
7766
7767/*
7768 * 8ch mode
7769 */
7770static struct hda_verb alc883_4ST_ch8_init[] = {
7771 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7772 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7773 { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
7774 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7775 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7776 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
7777 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7778 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7779 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7780 { } /* end */
7781};
7782
7783static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
7784 { 2, alc883_4ST_ch2_init },
7785 { 4, alc883_4ST_ch4_init },
7786 { 6, alc883_4ST_ch6_init },
7787 { 8, alc883_4ST_ch8_init },
7788};
7789
7790
7451/* 7791/*
7452 * 2ch mode 7792 * 2ch mode
7453 */ 7793 */
@@ -7517,6 +7857,49 @@ static struct hda_channel_mode alc883_sixstack_modes[2] = {
7517 { 8, alc883_sixstack_ch8_init }, 7857 { 8, alc883_sixstack_ch8_init },
7518}; 7858};
7519 7859
7860/* 2ch mode (Speaker:front, Subwoofer:CLFE, Line:input, Headphones:front) */
7861static struct hda_verb alc889A_mb31_ch2_init[] = {
7862 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP as front */
7863 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Subwoofer on */
7864 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Line as input */
7865 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, /* Line off */
7866 { } /* end */
7867};
7868
7869/* 4ch mode (Speaker:front, Subwoofer:CLFE, Line:CLFE, Headphones:front) */
7870static struct hda_verb alc889A_mb31_ch4_init[] = {
7871 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP as front */
7872 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Subwoofer on */
7873 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, /* Line as output */
7874 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Line on */
7875 { } /* end */
7876};
7877
7878/* 5ch mode (Speaker:front, Subwoofer:CLFE, Line:input, Headphones:rear) */
7879static struct hda_verb alc889A_mb31_ch5_init[] = {
7880 {0x15, AC_VERB_SET_CONNECT_SEL, 0x01}, /* HP as rear */
7881 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Subwoofer on */
7882 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Line as input */
7883 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, /* Line off */
7884 { } /* end */
7885};
7886
7887/* 6ch mode (Speaker:front, Subwoofer:off, Line:CLFE, Headphones:Rear) */
7888static struct hda_verb alc889A_mb31_ch6_init[] = {
7889 {0x15, AC_VERB_SET_CONNECT_SEL, 0x01}, /* HP as front */
7890 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, /* Subwoofer off */
7891 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, /* Line as output */
7892 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, /* Line on */
7893 { } /* end */
7894};
7895
7896static struct hda_channel_mode alc889A_mb31_6ch_modes[4] = {
7897 { 2, alc889A_mb31_ch2_init },
7898 { 4, alc889A_mb31_ch4_init },
7899 { 5, alc889A_mb31_ch5_init },
7900 { 6, alc889A_mb31_ch6_init },
7901};
7902
7520static struct hda_verb alc883_medion_eapd_verbs[] = { 7903static struct hda_verb alc883_medion_eapd_verbs[] = {
7521 /* eanable EAPD on medion laptop */ 7904 /* eanable EAPD on medion laptop */
7522 {0x20, AC_VERB_SET_COEF_INDEX, 0x07}, 7905 {0x20, AC_VERB_SET_COEF_INDEX, 0x07},
@@ -7782,8 +8165,6 @@ static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = {
7782 HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0d, 2, 2, HDA_INPUT), 8165 HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0d, 2, 2, HDA_INPUT),
7783 HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT), 8166 HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
7784 HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT), 8167 HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
7785 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
7786 HDA_CODEC_MUTE("iSpeaker Playback Switch", 0x1a, 0x0, HDA_OUTPUT),
7787 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), 8168 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
7788 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), 8169 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
7789 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), 8170 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
@@ -7797,6 +8178,42 @@ static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = {
7797 { } /* end */ 8178 { } /* end */
7798}; 8179};
7799 8180
8181static struct snd_kcontrol_new alc889A_mb31_mixer[] = {
8182 /* Output mixers */
8183 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
8184 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
8185 HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
8186 HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 0x02, HDA_INPUT),
8187 HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x00,
8188 HDA_OUTPUT),
8189 HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 0x02, HDA_INPUT),
8190 HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x00, HDA_OUTPUT),
8191 HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 0x02, HDA_INPUT),
8192 /* Output switches */
8193 HDA_CODEC_MUTE("Enable Speaker", 0x14, 0x00, HDA_OUTPUT),
8194 HDA_CODEC_MUTE("Enable Headphones", 0x15, 0x00, HDA_OUTPUT),
8195 HDA_CODEC_MUTE_MONO("Enable LFE", 0x16, 2, 0x00, HDA_OUTPUT),
8196 /* Boost mixers */
8197 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
8198 HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
8199 /* Input mixers */
8200 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
8201 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
8202 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
8203 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
8204 { } /* end */
8205};
8206
8207static struct snd_kcontrol_new alc883_vaiott_mixer[] = {
8208 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8209 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
8210 HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
8211 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
8212 HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
8213 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
8214 { } /* end */
8215};
8216
7800static struct hda_bind_ctls alc883_bind_cap_vol = { 8217static struct hda_bind_ctls alc883_bind_cap_vol = {
7801 .ops = &snd_hda_bind_vol, 8218 .ops = &snd_hda_bind_vol,
7802 .values = { 8219 .values = {
@@ -7932,16 +8349,14 @@ static struct hda_verb alc883_init_verbs[] = {
7932}; 8349};
7933 8350
7934/* toggle speaker-output according to the hp-jack state */ 8351/* toggle speaker-output according to the hp-jack state */
7935static void alc883_mitac_hp_automute(struct hda_codec *codec) 8352static void alc883_mitac_init_hook(struct hda_codec *codec)
7936{ 8353{
7937 unsigned int present; 8354 struct alc_spec *spec = codec->spec;
7938 8355
7939 present = snd_hda_codec_read(codec, 0x15, 0, 8356 spec->autocfg.hp_pins[0] = 0x15;
7940 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 8357 spec->autocfg.speaker_pins[0] = 0x14;
7941 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0, 8358 spec->autocfg.speaker_pins[1] = 0x17;
7942 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); 8359 alc_automute_amp(codec);
7943 snd_hda_codec_amp_stereo(codec, 0x17, HDA_OUTPUT, 0,
7944 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
7945} 8360}
7946 8361
7947/* auto-toggle front mic */ 8362/* auto-toggle front mic */
@@ -7958,25 +8373,6 @@ static void alc883_mitac_mic_automute(struct hda_codec *codec)
7958} 8373}
7959*/ 8374*/
7960 8375
7961static void alc883_mitac_automute(struct hda_codec *codec)
7962{
7963 alc883_mitac_hp_automute(codec);
7964 /* alc883_mitac_mic_automute(codec); */
7965}
7966
7967static void alc883_mitac_unsol_event(struct hda_codec *codec,
7968 unsigned int res)
7969{
7970 switch (res >> 26) {
7971 case ALC880_HP_EVENT:
7972 alc883_mitac_hp_automute(codec);
7973 break;
7974 case ALC880_MIC_EVENT:
7975 /* alc883_mitac_mic_automute(codec); */
7976 break;
7977 }
7978}
7979
7980static struct hda_verb alc883_mitac_verbs[] = { 8376static struct hda_verb alc883_mitac_verbs[] = {
7981 /* HP */ 8377 /* HP */
7982 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, 8378 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -8028,14 +8424,24 @@ static struct hda_verb alc883_tagra_verbs[] = {
8028 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 8424 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8029 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, 8425 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8030 8426
8031 {0x18, AC_VERB_SET_CONNECT_SEL, 0x02}, /* mic/clfe */ 8427/* Connect Line-Out side jack (SPDIF) to Side */
8032 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x01}, /* line/surround */ 8428 {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8033 {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP */ 8429 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8430 {0x17, AC_VERB_SET_CONNECT_SEL, 0x03},
8431/* Connect Mic jack to CLFE */
8432 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8433 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8434 {0x18, AC_VERB_SET_CONNECT_SEL, 0x02},
8435/* Connect Line-in jack to Surround */
8436 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8437 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8438 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x01},
8439/* Connect HP out jack to Front */
8440 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8441 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8442 {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
8034 8443
8035 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, 8444 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8036 {0x01, AC_VERB_SET_GPIO_MASK, 0x03},
8037 {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03},
8038 {0x01, AC_VERB_SET_GPIO_DATA, 0x03},
8039 8445
8040 { } /* end */ 8446 { } /* end */
8041}; 8447};
@@ -8094,29 +8500,26 @@ static struct hda_verb alc888_6st_dell_verbs[] = {
8094 { } 8500 { }
8095}; 8501};
8096 8502
8097static void alc888_3st_hp_front_automute(struct hda_codec *codec) 8503static struct hda_verb alc883_vaiott_verbs[] = {
8098{ 8504 /* HP */
8099 unsigned int present, bits; 8505 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8506 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8100 8507
8101 present = snd_hda_codec_read(codec, 0x1b, 0, 8508 /* enable unsolicited event */
8102 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 8509 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8103 bits = present ? HDA_AMP_MUTE : 0; 8510
8104 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0, 8511 { } /* end */
8105 HDA_AMP_MUTE, bits); 8512};
8106 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
8107 HDA_AMP_MUTE, bits);
8108 snd_hda_codec_amp_stereo(codec, 0x18, HDA_OUTPUT, 0,
8109 HDA_AMP_MUTE, bits);
8110}
8111 8513
8112static void alc888_3st_hp_unsol_event(struct hda_codec *codec, 8514static void alc888_3st_hp_init_hook(struct hda_codec *codec)
8113 unsigned int res)
8114{ 8515{
8115 switch (res >> 26) { 8516 struct alc_spec *spec = codec->spec;
8116 case ALC880_HP_EVENT: 8517
8117 alc888_3st_hp_front_automute(codec); 8518 spec->autocfg.hp_pins[0] = 0x1b;
8118 break; 8519 spec->autocfg.speaker_pins[0] = 0x14;
8119 } 8520 spec->autocfg.speaker_pins[1] = 0x16;
8521 spec->autocfg.speaker_pins[2] = 0x18;
8522 alc_automute_amp(codec);
8120} 8523}
8121 8524
8122static struct hda_verb alc888_3st_hp_verbs[] = { 8525static struct hda_verb alc888_3st_hp_verbs[] = {
@@ -8213,56 +8616,18 @@ static struct hda_verb alc883_medion_md2_verbs[] = {
8213}; 8616};
8214 8617
8215/* toggle speaker-output according to the hp-jack state */ 8618/* toggle speaker-output according to the hp-jack state */
8216static void alc883_medion_md2_automute(struct hda_codec *codec) 8619static void alc883_medion_md2_init_hook(struct hda_codec *codec)
8217{ 8620{
8218 unsigned int present; 8621 struct alc_spec *spec = codec->spec;
8219
8220 present = snd_hda_codec_read(codec, 0x14, 0,
8221 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
8222 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8223 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8224}
8225
8226static void alc883_medion_md2_unsol_event(struct hda_codec *codec,
8227 unsigned int res)
8228{
8229 if ((res >> 26) == ALC880_HP_EVENT)
8230 alc883_medion_md2_automute(codec);
8231}
8232
8233/* toggle speaker-output according to the hp-jack state */
8234static void alc883_tagra_automute(struct hda_codec *codec)
8235{
8236 unsigned int present;
8237 unsigned char bits;
8238
8239 present = snd_hda_codec_read(codec, 0x14, 0,
8240 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
8241 bits = present ? HDA_AMP_MUTE : 0;
8242 snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
8243 HDA_AMP_MUTE, bits);
8244 snd_hda_codec_write_cache(codec, 1, 0, AC_VERB_SET_GPIO_DATA,
8245 present ? 1 : 3);
8246}
8247 8622
8248static void alc883_tagra_unsol_event(struct hda_codec *codec, unsigned int res) 8623 spec->autocfg.hp_pins[0] = 0x14;
8249{ 8624 spec->autocfg.speaker_pins[0] = 0x15;
8250 if ((res >> 26) == ALC880_HP_EVENT) 8625 alc_automute_amp(codec);
8251 alc883_tagra_automute(codec);
8252} 8626}
8253 8627
8254/* toggle speaker-output according to the hp-jack state */ 8628/* toggle speaker-output according to the hp-jack state */
8255static void alc883_clevo_m720_hp_automute(struct hda_codec *codec) 8629#define alc883_tagra_init_hook alc882_targa_init_hook
8256{ 8630#define alc883_tagra_unsol_event alc882_targa_unsol_event
8257 unsigned int present;
8258 unsigned char bits;
8259
8260 present = snd_hda_codec_read(codec, 0x15, 0, AC_VERB_GET_PIN_SENSE, 0)
8261 & AC_PINSENSE_PRESENCE;
8262 bits = present ? HDA_AMP_MUTE : 0;
8263 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
8264 HDA_AMP_MUTE, bits);
8265}
8266 8631
8267static void alc883_clevo_m720_mic_automute(struct hda_codec *codec) 8632static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
8268{ 8633{
@@ -8274,9 +8639,13 @@ static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
8274 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); 8639 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8275} 8640}
8276 8641
8277static void alc883_clevo_m720_automute(struct hda_codec *codec) 8642static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
8278{ 8643{
8279 alc883_clevo_m720_hp_automute(codec); 8644 struct alc_spec *spec = codec->spec;
8645
8646 spec->autocfg.hp_pins[0] = 0x15;
8647 spec->autocfg.speaker_pins[0] = 0x14;
8648 alc_automute_amp(codec);
8280 alc883_clevo_m720_mic_automute(codec); 8649 alc883_clevo_m720_mic_automute(codec);
8281} 8650}
8282 8651
@@ -8284,52 +8653,32 @@ static void alc883_clevo_m720_unsol_event(struct hda_codec *codec,
8284 unsigned int res) 8653 unsigned int res)
8285{ 8654{
8286 switch (res >> 26) { 8655 switch (res >> 26) {
8287 case ALC880_HP_EVENT:
8288 alc883_clevo_m720_hp_automute(codec);
8289 break;
8290 case ALC880_MIC_EVENT: 8656 case ALC880_MIC_EVENT:
8291 alc883_clevo_m720_mic_automute(codec); 8657 alc883_clevo_m720_mic_automute(codec);
8292 break; 8658 break;
8659 default:
8660 alc_automute_amp_unsol_event(codec, res);
8661 break;
8293 } 8662 }
8294} 8663}
8295 8664
8296/* toggle speaker-output according to the hp-jack state */ 8665/* toggle speaker-output according to the hp-jack state */
8297static void alc883_2ch_fujitsu_pi2515_automute(struct hda_codec *codec) 8666static void alc883_2ch_fujitsu_pi2515_init_hook(struct hda_codec *codec)
8298{ 8667{
8299 unsigned int present; 8668 struct alc_spec *spec = codec->spec;
8300 unsigned char bits;
8301
8302 present = snd_hda_codec_read(codec, 0x14, 0, AC_VERB_GET_PIN_SENSE, 0)
8303 & AC_PINSENSE_PRESENCE;
8304 bits = present ? HDA_AMP_MUTE : 0;
8305 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8306 HDA_AMP_MUTE, bits);
8307}
8308 8669
8309static void alc883_2ch_fujitsu_pi2515_unsol_event(struct hda_codec *codec, 8670 spec->autocfg.hp_pins[0] = 0x14;
8310 unsigned int res) 8671 spec->autocfg.speaker_pins[0] = 0x15;
8311{ 8672 alc_automute_amp(codec);
8312 if ((res >> 26) == ALC880_HP_EVENT)
8313 alc883_2ch_fujitsu_pi2515_automute(codec);
8314} 8673}
8315 8674
8316static void alc883_haier_w66_automute(struct hda_codec *codec) 8675static void alc883_haier_w66_init_hook(struct hda_codec *codec)
8317{ 8676{
8318 unsigned int present; 8677 struct alc_spec *spec = codec->spec;
8319 unsigned char bits;
8320 8678
8321 present = snd_hda_codec_read(codec, 0x1b, 0, 8679 spec->autocfg.hp_pins[0] = 0x1b;
8322 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 8680 spec->autocfg.speaker_pins[0] = 0x14;
8323 bits = present ? 0x80 : 0; 8681 alc_automute_amp(codec);
8324 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
8325 0x80, bits);
8326}
8327
8328static void alc883_haier_w66_unsol_event(struct hda_codec *codec,
8329 unsigned int res)
8330{
8331 if ((res >> 26) == ALC880_HP_EVENT)
8332 alc883_haier_w66_automute(codec);
8333} 8682}
8334 8683
8335static void alc883_lenovo_101e_ispeaker_automute(struct hda_codec *codec) 8684static void alc883_lenovo_101e_ispeaker_automute(struct hda_codec *codec)
@@ -8337,8 +8686,8 @@ static void alc883_lenovo_101e_ispeaker_automute(struct hda_codec *codec)
8337 unsigned int present; 8686 unsigned int present;
8338 unsigned char bits; 8687 unsigned char bits;
8339 8688
8340 present = snd_hda_codec_read(codec, 0x14, 0, 8689 present = snd_hda_codec_read(codec, 0x14, 0, AC_VERB_GET_PIN_SENSE, 0)
8341 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 8690 & AC_PINSENSE_PRESENCE;
8342 bits = present ? HDA_AMP_MUTE : 0; 8691 bits = present ? HDA_AMP_MUTE : 0;
8343 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0, 8692 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8344 HDA_AMP_MUTE, bits); 8693 HDA_AMP_MUTE, bits);
@@ -8368,23 +8717,14 @@ static void alc883_lenovo_101e_unsol_event(struct hda_codec *codec,
8368} 8717}
8369 8718
8370/* toggle speaker-output according to the hp-jack state */ 8719/* toggle speaker-output according to the hp-jack state */
8371static void alc883_acer_aspire_automute(struct hda_codec *codec) 8720static void alc883_acer_aspire_init_hook(struct hda_codec *codec)
8372{ 8721{
8373 unsigned int present; 8722 struct alc_spec *spec = codec->spec;
8374
8375 present = snd_hda_codec_read(codec, 0x14, 0,
8376 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
8377 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8378 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8379 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
8380 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8381}
8382 8723
8383static void alc883_acer_aspire_unsol_event(struct hda_codec *codec, 8724 spec->autocfg.hp_pins[0] = 0x14;
8384 unsigned int res) 8725 spec->autocfg.speaker_pins[0] = 0x15;
8385{ 8726 spec->autocfg.speaker_pins[1] = 0x16;
8386 if ((res >> 26) == ALC880_HP_EVENT) 8727 alc_automute_amp(codec);
8387 alc883_acer_aspire_automute(codec);
8388} 8728}
8389 8729
8390static struct hda_verb alc883_acer_eapd_verbs[] = { 8730static struct hda_verb alc883_acer_eapd_verbs[] = {
@@ -8405,75 +8745,39 @@ static struct hda_verb alc883_acer_eapd_verbs[] = {
8405 { } 8745 { }
8406}; 8746};
8407 8747
8408static void alc888_6st_dell_front_automute(struct hda_codec *codec) 8748static void alc888_6st_dell_init_hook(struct hda_codec *codec)
8409{ 8749{
8410 unsigned int present; 8750 struct alc_spec *spec = codec->spec;
8411
8412 present = snd_hda_codec_read(codec, 0x1b, 0,
8413 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
8414 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
8415 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8416 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8417 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8418 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
8419 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8420 snd_hda_codec_amp_stereo(codec, 0x17, HDA_OUTPUT, 0,
8421 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8422}
8423 8751
8424static void alc888_6st_dell_unsol_event(struct hda_codec *codec, 8752 spec->autocfg.hp_pins[0] = 0x1b;
8425 unsigned int res) 8753 spec->autocfg.speaker_pins[0] = 0x14;
8426{ 8754 spec->autocfg.speaker_pins[1] = 0x15;
8427 switch (res >> 26) { 8755 spec->autocfg.speaker_pins[2] = 0x16;
8428 case ALC880_HP_EVENT: 8756 spec->autocfg.speaker_pins[3] = 0x17;
8429 /* printk(KERN_DEBUG "hp_event\n"); */ 8757 alc_automute_amp(codec);
8430 alc888_6st_dell_front_automute(codec);
8431 break;
8432 }
8433} 8758}
8434 8759
8435static void alc888_lenovo_sky_front_automute(struct hda_codec *codec) 8760static void alc888_lenovo_sky_init_hook(struct hda_codec *codec)
8436{ 8761{
8437 unsigned int mute; 8762 struct alc_spec *spec = codec->spec;
8438 unsigned int present;
8439 8763
8440 snd_hda_codec_read(codec, 0x1b, 0, AC_VERB_SET_PIN_SENSE, 0); 8764 spec->autocfg.hp_pins[0] = 0x1b;
8441 present = snd_hda_codec_read(codec, 0x1b, 0, 8765 spec->autocfg.speaker_pins[0] = 0x14;
8442 AC_VERB_GET_PIN_SENSE, 0); 8766 spec->autocfg.speaker_pins[1] = 0x15;
8443 present = (present & 0x80000000) != 0; 8767 spec->autocfg.speaker_pins[2] = 0x16;
8444 if (present) { 8768 spec->autocfg.speaker_pins[3] = 0x17;
8445 /* mute internal speaker */ 8769 spec->autocfg.speaker_pins[4] = 0x1a;
8446 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0, 8770 alc_automute_amp(codec);
8447 HDA_AMP_MUTE, HDA_AMP_MUTE);
8448 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8449 HDA_AMP_MUTE, HDA_AMP_MUTE);
8450 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
8451 HDA_AMP_MUTE, HDA_AMP_MUTE);
8452 snd_hda_codec_amp_stereo(codec, 0x17, HDA_OUTPUT, 0,
8453 HDA_AMP_MUTE, HDA_AMP_MUTE);
8454 snd_hda_codec_amp_stereo(codec, 0x1a, HDA_OUTPUT, 0,
8455 HDA_AMP_MUTE, HDA_AMP_MUTE);
8456 } else {
8457 /* unmute internal speaker if necessary */
8458 mute = snd_hda_codec_amp_read(codec, 0x1b, 0, HDA_OUTPUT, 0);
8459 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
8460 HDA_AMP_MUTE, mute);
8461 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
8462 HDA_AMP_MUTE, mute);
8463 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
8464 HDA_AMP_MUTE, mute);
8465 snd_hda_codec_amp_stereo(codec, 0x17, HDA_OUTPUT, 0,
8466 HDA_AMP_MUTE, mute);
8467 snd_hda_codec_amp_stereo(codec, 0x1a, HDA_OUTPUT, 0,
8468 HDA_AMP_MUTE, mute);
8469 }
8470} 8771}
8471 8772
8472static void alc883_lenovo_sky_unsol_event(struct hda_codec *codec, 8773static void alc883_vaiott_init_hook(struct hda_codec *codec)
8473 unsigned int res)
8474{ 8774{
8475 if ((res >> 26) == ALC880_HP_EVENT) 8775 struct alc_spec *spec = codec->spec;
8476 alc888_lenovo_sky_front_automute(codec); 8776
8777 spec->autocfg.hp_pins[0] = 0x15;
8778 spec->autocfg.speaker_pins[0] = 0x14;
8779 spec->autocfg.speaker_pins[1] = 0x17;
8780 alc_automute_amp(codec);
8477} 8781}
8478 8782
8479/* 8783/*
@@ -8561,39 +8865,33 @@ static void alc883_nb_mic_automute(struct hda_codec *codec)
8561 0x7000 | (0x01 << 8) | (present ? 0x80 : 0)); 8865 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
8562} 8866}
8563 8867
8564static void alc883_M90V_speaker_automute(struct hda_codec *codec) 8868static void alc883_M90V_init_hook(struct hda_codec *codec)
8565{ 8869{
8566 unsigned int present; 8870 struct alc_spec *spec = codec->spec;
8567 unsigned char bits;
8568 8871
8569 present = snd_hda_codec_read(codec, 0x1b, 0, 8872 spec->autocfg.hp_pins[0] = 0x1b;
8570 AC_VERB_GET_PIN_SENSE, 0) 8873 spec->autocfg.speaker_pins[0] = 0x14;
8571 & AC_PINSENSE_PRESENCE; 8874 spec->autocfg.speaker_pins[1] = 0x15;
8572 bits = present ? 0 : PIN_OUT; 8875 spec->autocfg.speaker_pins[2] = 0x16;
8573 snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 8876 alc_automute_pin(codec);
8574 bits);
8575 snd_hda_codec_write(codec, 0x15, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
8576 bits);
8577 snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
8578 bits);
8579} 8877}
8580 8878
8581static void alc883_mode2_unsol_event(struct hda_codec *codec, 8879static void alc883_mode2_unsol_event(struct hda_codec *codec,
8582 unsigned int res) 8880 unsigned int res)
8583{ 8881{
8584 switch (res >> 26) { 8882 switch (res >> 26) {
8585 case ALC880_HP_EVENT:
8586 alc883_M90V_speaker_automute(codec);
8587 break;
8588 case ALC880_MIC_EVENT: 8883 case ALC880_MIC_EVENT:
8589 alc883_nb_mic_automute(codec); 8884 alc883_nb_mic_automute(codec);
8590 break; 8885 break;
8886 default:
8887 alc_sku_unsol_event(codec, res);
8888 break;
8591 } 8889 }
8592} 8890}
8593 8891
8594static void alc883_mode2_inithook(struct hda_codec *codec) 8892static void alc883_mode2_inithook(struct hda_codec *codec)
8595{ 8893{
8596 alc883_M90V_speaker_automute(codec); 8894 alc883_M90V_init_hook(codec);
8597 alc883_nb_mic_automute(codec); 8895 alc883_nb_mic_automute(codec);
8598} 8896}
8599 8897
@@ -8610,32 +8908,49 @@ static struct hda_verb alc888_asus_eee1601_verbs[] = {
8610 { } /* end */ 8908 { } /* end */
8611}; 8909};
8612 8910
8613static void alc883_eee1601_speaker_automute(struct hda_codec *codec) 8911static void alc883_eee1601_inithook(struct hda_codec *codec)
8614{ 8912{
8615 unsigned int present; 8913 struct alc_spec *spec = codec->spec;
8616 unsigned char bits;
8617 8914
8618 present = snd_hda_codec_read(codec, 0x14, 0, 8915 spec->autocfg.hp_pins[0] = 0x14;
8619 AC_VERB_GET_PIN_SENSE, 0) 8916 spec->autocfg.speaker_pins[0] = 0x1b;
8620 & AC_PINSENSE_PRESENCE; 8917 alc_automute_pin(codec);
8621 bits = present ? 0 : PIN_OUT;
8622 snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
8623 bits);
8624} 8918}
8625 8919
8626static void alc883_eee1601_unsol_event(struct hda_codec *codec, 8920static struct hda_verb alc889A_mb31_verbs[] = {
8627 unsigned int res) 8921 /* Init rear pin (used as headphone output) */
8922 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4}, /* Apple Headphones */
8923 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* Connect to front */
8924 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8925 /* Init line pin (used as output in 4ch and 6ch mode) */
8926 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x02}, /* Connect to CLFE */
8927 /* Init line 2 pin (used as headphone out by default) */
8928 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Use as input */
8929 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, /* Mute output */
8930 { } /* end */
8931};
8932
8933/* Mute speakers according to the headphone jack state */
8934static void alc889A_mb31_automute(struct hda_codec *codec)
8628{ 8935{
8629 switch (res >> 26) { 8936 unsigned int present;
8630 case ALC880_HP_EVENT: 8937
8631 alc883_eee1601_speaker_automute(codec); 8938 /* Mute only in 2ch or 4ch mode */
8632 break; 8939 if (snd_hda_codec_read(codec, 0x15, 0, AC_VERB_GET_CONNECT_SEL, 0)
8940 == 0x00) {
8941 present = snd_hda_codec_read(codec, 0x15, 0,
8942 AC_VERB_GET_PIN_SENSE, 0) & AC_PINSENSE_PRESENCE;
8943 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
8944 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8945 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
8946 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8633 } 8947 }
8634} 8948}
8635 8949
8636static void alc883_eee1601_inithook(struct hda_codec *codec) 8950static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
8637{ 8951{
8638 alc883_eee1601_speaker_automute(codec); 8952 if ((res >> 26) == ALC880_HP_EVENT)
8953 alc889A_mb31_automute(codec);
8639} 8954}
8640 8955
8641#ifdef CONFIG_SND_HDA_POWER_SAVE 8956#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -8659,9 +8974,11 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
8659 [ALC883_6ST_DIG] = "6stack-dig", 8974 [ALC883_6ST_DIG] = "6stack-dig",
8660 [ALC883_TARGA_DIG] = "targa-dig", 8975 [ALC883_TARGA_DIG] = "targa-dig",
8661 [ALC883_TARGA_2ch_DIG] = "targa-2ch-dig", 8976 [ALC883_TARGA_2ch_DIG] = "targa-2ch-dig",
8977 [ALC883_TARGA_8ch_DIG] = "targa-8ch-dig",
8662 [ALC883_ACER] = "acer", 8978 [ALC883_ACER] = "acer",
8663 [ALC883_ACER_ASPIRE] = "acer-aspire", 8979 [ALC883_ACER_ASPIRE] = "acer-aspire",
8664 [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g", 8980 [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g",
8981 [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g",
8665 [ALC883_MEDION] = "medion", 8982 [ALC883_MEDION] = "medion",
8666 [ALC883_MEDION_MD2] = "medion-md2", 8983 [ALC883_MEDION_MD2] = "medion-md2",
8667 [ALC883_LAPTOP_EAPD] = "laptop-eapd", 8984 [ALC883_LAPTOP_EAPD] = "laptop-eapd",
@@ -8678,6 +8995,8 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
8678 [ALC888_FUJITSU_XA3530] = "fujitsu-xa3530", 8995 [ALC888_FUJITSU_XA3530] = "fujitsu-xa3530",
8679 [ALC883_3ST_6ch_INTEL] = "3stack-6ch-intel", 8996 [ALC883_3ST_6ch_INTEL] = "3stack-6ch-intel",
8680 [ALC1200_ASUS_P5Q] = "asus-p5q", 8997 [ALC1200_ASUS_P5Q] = "asus-p5q",
8998 [ALC889A_MB31] = "mb31",
8999 [ALC883_SONY_VAIO_TT] = "sony-vaio-tt",
8681 [ALC883_AUTO] = "auto", 9000 [ALC883_AUTO] = "auto",
8682}; 9001};
8683 9002
@@ -8693,14 +9012,18 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8693 ALC888_ACER_ASPIRE_4930G), 9012 ALC888_ACER_ASPIRE_4930G),
8694 SND_PCI_QUIRK(0x1025, 0x013f, "Acer Aspire 5930G", 9013 SND_PCI_QUIRK(0x1025, 0x013f, "Acer Aspire 5930G",
8695 ALC888_ACER_ASPIRE_4930G), 9014 ALC888_ACER_ASPIRE_4930G),
9015 SND_PCI_QUIRK(0x1025, 0x0145, "Acer Aspire 8930G",
9016 ALC888_ACER_ASPIRE_8930G),
8696 SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO), 9017 SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO),
8697 SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO), 9018 SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO),
8698 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", 9019 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
8699 ALC888_ACER_ASPIRE_4930G), 9020 ALC888_ACER_ASPIRE_4930G),
8700 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", 9021 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
8701 ALC888_ACER_ASPIRE_4930G), 9022 ALC888_ACER_ASPIRE_4930G),
8702 /* default Acer */ 9023 /* default Acer -- disabled as it causes more problems.
8703 SND_PCI_QUIRK_VENDOR(0x1025, "Acer laptop", ALC883_ACER), 9024 * model=auto should work fine now
9025 */
9026 /* SND_PCI_QUIRK_VENDOR(0x1025, "Acer laptop", ALC883_ACER), */
8704 SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL), 9027 SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
8705 SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG), 9028 SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
8706 SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP), 9029 SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP),
@@ -8736,6 +9059,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8736 SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG), 9059 SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG),
8737 SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG), 9060 SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG),
8738 SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG), 9061 SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG),
9062 SND_PCI_QUIRK(0x1462, 0x6510, "MSI GX620", ALC883_TARGA_8ch_DIG),
8739 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC883_6ST_DIG), 9063 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC883_6ST_DIG),
8740 SND_PCI_QUIRK(0x1462, 0x7187, "MSI", ALC883_6ST_DIG), 9064 SND_PCI_QUIRK(0x1462, 0x7187, "MSI", ALC883_6ST_DIG),
8741 SND_PCI_QUIRK(0x1462, 0x7250, "MSI", ALC883_6ST_DIG), 9065 SND_PCI_QUIRK(0x1462, 0x7250, "MSI", ALC883_6ST_DIG),
@@ -8768,6 +9092,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8768 SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC), 9092 SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC),
8769 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL), 9093 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL),
8770 SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), 9094 SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
9095 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
8771 {} 9096 {}
8772}; 9097};
8773 9098
@@ -8848,7 +9173,7 @@ static struct alc_config_preset alc883_presets[] = {
8848 .need_dac_fix = 1, 9173 .need_dac_fix = 1,
8849 .input_mux = &alc883_capture_source, 9174 .input_mux = &alc883_capture_source,
8850 .unsol_event = alc883_tagra_unsol_event, 9175 .unsol_event = alc883_tagra_unsol_event,
8851 .init_hook = alc883_tagra_automute, 9176 .init_hook = alc883_tagra_init_hook,
8852 }, 9177 },
8853 [ALC883_TARGA_2ch_DIG] = { 9178 [ALC883_TARGA_2ch_DIG] = {
8854 .mixers = { alc883_tagra_2ch_mixer}, 9179 .mixers = { alc883_tagra_2ch_mixer},
@@ -8862,7 +9187,25 @@ static struct alc_config_preset alc883_presets[] = {
8862 .channel_mode = alc883_3ST_2ch_modes, 9187 .channel_mode = alc883_3ST_2ch_modes,
8863 .input_mux = &alc883_capture_source, 9188 .input_mux = &alc883_capture_source,
8864 .unsol_event = alc883_tagra_unsol_event, 9189 .unsol_event = alc883_tagra_unsol_event,
8865 .init_hook = alc883_tagra_automute, 9190 .init_hook = alc883_tagra_init_hook,
9191 },
9192 [ALC883_TARGA_8ch_DIG] = {
9193 .mixers = { alc883_base_mixer, alc883_chmode_mixer },
9194 .init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs,
9195 alc883_tagra_verbs },
9196 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9197 .dac_nids = alc883_dac_nids,
9198 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
9199 .adc_nids = alc883_adc_nids_rev,
9200 .capsrc_nids = alc883_capsrc_nids_rev,
9201 .dig_out_nid = ALC883_DIGOUT_NID,
9202 .dig_in_nid = ALC883_DIGIN_NID,
9203 .num_channel_mode = ARRAY_SIZE(alc883_4ST_8ch_modes),
9204 .channel_mode = alc883_4ST_8ch_modes,
9205 .need_dac_fix = 1,
9206 .input_mux = &alc883_capture_source,
9207 .unsol_event = alc883_tagra_unsol_event,
9208 .init_hook = alc883_tagra_init_hook,
8866 }, 9209 },
8867 [ALC883_ACER] = { 9210 [ALC883_ACER] = {
8868 .mixers = { alc883_base_mixer }, 9211 .mixers = { alc883_base_mixer },
@@ -8887,8 +9230,8 @@ static struct alc_config_preset alc883_presets[] = {
8887 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), 9230 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
8888 .channel_mode = alc883_3ST_2ch_modes, 9231 .channel_mode = alc883_3ST_2ch_modes,
8889 .input_mux = &alc883_capture_source, 9232 .input_mux = &alc883_capture_source,
8890 .unsol_event = alc883_acer_aspire_unsol_event, 9233 .unsol_event = alc_automute_amp_unsol_event,
8891 .init_hook = alc883_acer_aspire_automute, 9234 .init_hook = alc883_acer_aspire_init_hook,
8892 }, 9235 },
8893 [ALC888_ACER_ASPIRE_4930G] = { 9236 [ALC888_ACER_ASPIRE_4930G] = {
8894 .mixers = { alc888_base_mixer, 9237 .mixers = { alc888_base_mixer,
@@ -8907,8 +9250,29 @@ static struct alc_config_preset alc883_presets[] = {
8907 .num_mux_defs = 9250 .num_mux_defs =
8908 ARRAY_SIZE(alc888_2_capture_sources), 9251 ARRAY_SIZE(alc888_2_capture_sources),
8909 .input_mux = alc888_2_capture_sources, 9252 .input_mux = alc888_2_capture_sources,
8910 .unsol_event = alc888_acer_aspire_4930g_unsol_event, 9253 .unsol_event = alc_automute_amp_unsol_event,
8911 .init_hook = alc888_acer_aspire_4930g_automute, 9254 .init_hook = alc888_acer_aspire_4930g_init_hook,
9255 },
9256 [ALC888_ACER_ASPIRE_8930G] = {
9257 .mixers = { alc888_base_mixer,
9258 alc883_chmode_mixer },
9259 .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
9260 alc889_acer_aspire_8930g_verbs },
9261 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9262 .dac_nids = alc883_dac_nids,
9263 .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
9264 .adc_nids = alc889_adc_nids,
9265 .capsrc_nids = alc889_capsrc_nids,
9266 .dig_out_nid = ALC883_DIGOUT_NID,
9267 .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_modes),
9268 .channel_mode = alc883_3ST_6ch_modes,
9269 .need_dac_fix = 1,
9270 .const_channel_count = 6,
9271 .num_mux_defs =
9272 ARRAY_SIZE(alc889_capture_sources),
9273 .input_mux = alc889_capture_sources,
9274 .unsol_event = alc_automute_amp_unsol_event,
9275 .init_hook = alc889_acer_aspire_8930g_init_hook,
8912 }, 9276 },
8913 [ALC883_MEDION] = { 9277 [ALC883_MEDION] = {
8914 .mixers = { alc883_fivestack_mixer, 9278 .mixers = { alc883_fivestack_mixer,
@@ -8932,8 +9296,8 @@ static struct alc_config_preset alc883_presets[] = {
8932 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), 9296 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
8933 .channel_mode = alc883_3ST_2ch_modes, 9297 .channel_mode = alc883_3ST_2ch_modes,
8934 .input_mux = &alc883_capture_source, 9298 .input_mux = &alc883_capture_source,
8935 .unsol_event = alc883_medion_md2_unsol_event, 9299 .unsol_event = alc_automute_amp_unsol_event,
8936 .init_hook = alc883_medion_md2_automute, 9300 .init_hook = alc883_medion_md2_init_hook,
8937 }, 9301 },
8938 [ALC883_LAPTOP_EAPD] = { 9302 [ALC883_LAPTOP_EAPD] = {
8939 .mixers = { alc883_base_mixer }, 9303 .mixers = { alc883_base_mixer },
@@ -8954,7 +9318,7 @@ static struct alc_config_preset alc883_presets[] = {
8954 .channel_mode = alc883_3ST_2ch_modes, 9318 .channel_mode = alc883_3ST_2ch_modes,
8955 .input_mux = &alc883_capture_source, 9319 .input_mux = &alc883_capture_source,
8956 .unsol_event = alc883_clevo_m720_unsol_event, 9320 .unsol_event = alc883_clevo_m720_unsol_event,
8957 .init_hook = alc883_clevo_m720_automute, 9321 .init_hook = alc883_clevo_m720_init_hook,
8958 }, 9322 },
8959 [ALC883_LENOVO_101E_2ch] = { 9323 [ALC883_LENOVO_101E_2ch] = {
8960 .mixers = { alc883_lenovo_101e_2ch_mixer}, 9324 .mixers = { alc883_lenovo_101e_2ch_mixer},
@@ -8978,8 +9342,8 @@ static struct alc_config_preset alc883_presets[] = {
8978 .channel_mode = alc883_3ST_2ch_modes, 9342 .channel_mode = alc883_3ST_2ch_modes,
8979 .need_dac_fix = 1, 9343 .need_dac_fix = 1,
8980 .input_mux = &alc883_lenovo_nb0763_capture_source, 9344 .input_mux = &alc883_lenovo_nb0763_capture_source,
8981 .unsol_event = alc883_medion_md2_unsol_event, 9345 .unsol_event = alc_automute_amp_unsol_event,
8982 .init_hook = alc883_medion_md2_automute, 9346 .init_hook = alc883_medion_md2_init_hook,
8983 }, 9347 },
8984 [ALC888_LENOVO_MS7195_DIG] = { 9348 [ALC888_LENOVO_MS7195_DIG] = {
8985 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer }, 9349 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9003,8 +9367,8 @@ static struct alc_config_preset alc883_presets[] = {
9003 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), 9367 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9004 .channel_mode = alc883_3ST_2ch_modes, 9368 .channel_mode = alc883_3ST_2ch_modes,
9005 .input_mux = &alc883_capture_source, 9369 .input_mux = &alc883_capture_source,
9006 .unsol_event = alc883_haier_w66_unsol_event, 9370 .unsol_event = alc_automute_amp_unsol_event,
9007 .init_hook = alc883_haier_w66_automute, 9371 .init_hook = alc883_haier_w66_init_hook,
9008 }, 9372 },
9009 [ALC888_3ST_HP] = { 9373 [ALC888_3ST_HP] = {
9010 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer }, 9374 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9015,8 +9379,8 @@ static struct alc_config_preset alc883_presets[] = {
9015 .channel_mode = alc888_3st_hp_modes, 9379 .channel_mode = alc888_3st_hp_modes,
9016 .need_dac_fix = 1, 9380 .need_dac_fix = 1,
9017 .input_mux = &alc883_capture_source, 9381 .input_mux = &alc883_capture_source,
9018 .unsol_event = alc888_3st_hp_unsol_event, 9382 .unsol_event = alc_automute_amp_unsol_event,
9019 .init_hook = alc888_3st_hp_front_automute, 9383 .init_hook = alc888_3st_hp_init_hook,
9020 }, 9384 },
9021 [ALC888_6ST_DELL] = { 9385 [ALC888_6ST_DELL] = {
9022 .mixers = { alc883_base_mixer, alc883_chmode_mixer }, 9386 .mixers = { alc883_base_mixer, alc883_chmode_mixer },
@@ -9028,8 +9392,8 @@ static struct alc_config_preset alc883_presets[] = {
9028 .num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes), 9392 .num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
9029 .channel_mode = alc883_sixstack_modes, 9393 .channel_mode = alc883_sixstack_modes,
9030 .input_mux = &alc883_capture_source, 9394 .input_mux = &alc883_capture_source,
9031 .unsol_event = alc888_6st_dell_unsol_event, 9395 .unsol_event = alc_automute_amp_unsol_event,
9032 .init_hook = alc888_6st_dell_front_automute, 9396 .init_hook = alc888_6st_dell_init_hook,
9033 }, 9397 },
9034 [ALC883_MITAC] = { 9398 [ALC883_MITAC] = {
9035 .mixers = { alc883_mitac_mixer }, 9399 .mixers = { alc883_mitac_mixer },
@@ -9039,8 +9403,8 @@ static struct alc_config_preset alc883_presets[] = {
9039 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), 9403 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9040 .channel_mode = alc883_3ST_2ch_modes, 9404 .channel_mode = alc883_3ST_2ch_modes,
9041 .input_mux = &alc883_capture_source, 9405 .input_mux = &alc883_capture_source,
9042 .unsol_event = alc883_mitac_unsol_event, 9406 .unsol_event = alc_automute_amp_unsol_event,
9043 .init_hook = alc883_mitac_automute, 9407 .init_hook = alc883_mitac_init_hook,
9044 }, 9408 },
9045 [ALC883_FUJITSU_PI2515] = { 9409 [ALC883_FUJITSU_PI2515] = {
9046 .mixers = { alc883_2ch_fujitsu_pi2515_mixer }, 9410 .mixers = { alc883_2ch_fujitsu_pi2515_mixer },
@@ -9052,8 +9416,8 @@ static struct alc_config_preset alc883_presets[] = {
9052 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), 9416 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9053 .channel_mode = alc883_3ST_2ch_modes, 9417 .channel_mode = alc883_3ST_2ch_modes,
9054 .input_mux = &alc883_fujitsu_pi2515_capture_source, 9418 .input_mux = &alc883_fujitsu_pi2515_capture_source,
9055 .unsol_event = alc883_2ch_fujitsu_pi2515_unsol_event, 9419 .unsol_event = alc_automute_amp_unsol_event,
9056 .init_hook = alc883_2ch_fujitsu_pi2515_automute, 9420 .init_hook = alc883_2ch_fujitsu_pi2515_init_hook,
9057 }, 9421 },
9058 [ALC888_FUJITSU_XA3530] = { 9422 [ALC888_FUJITSU_XA3530] = {
9059 .mixers = { alc888_base_mixer, alc883_chmode_mixer }, 9423 .mixers = { alc888_base_mixer, alc883_chmode_mixer },
@@ -9070,8 +9434,8 @@ static struct alc_config_preset alc883_presets[] = {
9070 .num_mux_defs = 9434 .num_mux_defs =
9071 ARRAY_SIZE(alc888_2_capture_sources), 9435 ARRAY_SIZE(alc888_2_capture_sources),
9072 .input_mux = alc888_2_capture_sources, 9436 .input_mux = alc888_2_capture_sources,
9073 .unsol_event = alc888_fujitsu_xa3530_unsol_event, 9437 .unsol_event = alc_automute_amp_unsol_event,
9074 .init_hook = alc888_fujitsu_xa3530_automute, 9438 .init_hook = alc888_fujitsu_xa3530_init_hook,
9075 }, 9439 },
9076 [ALC888_LENOVO_SKY] = { 9440 [ALC888_LENOVO_SKY] = {
9077 .mixers = { alc888_lenovo_sky_mixer, alc883_chmode_mixer }, 9441 .mixers = { alc888_lenovo_sky_mixer, alc883_chmode_mixer },
@@ -9083,8 +9447,8 @@ static struct alc_config_preset alc883_presets[] = {
9083 .channel_mode = alc883_sixstack_modes, 9447 .channel_mode = alc883_sixstack_modes,
9084 .need_dac_fix = 1, 9448 .need_dac_fix = 1,
9085 .input_mux = &alc883_lenovo_sky_capture_source, 9449 .input_mux = &alc883_lenovo_sky_capture_source,
9086 .unsol_event = alc883_lenovo_sky_unsol_event, 9450 .unsol_event = alc_automute_amp_unsol_event,
9087 .init_hook = alc888_lenovo_sky_front_automute, 9451 .init_hook = alc888_lenovo_sky_init_hook,
9088 }, 9452 },
9089 [ALC888_ASUS_M90V] = { 9453 [ALC888_ASUS_M90V] = {
9090 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer }, 9454 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9112,7 +9476,7 @@ static struct alc_config_preset alc883_presets[] = {
9112 .channel_mode = alc883_3ST_2ch_modes, 9476 .channel_mode = alc883_3ST_2ch_modes,
9113 .need_dac_fix = 1, 9477 .need_dac_fix = 1,
9114 .input_mux = &alc883_asus_eee1601_capture_source, 9478 .input_mux = &alc883_asus_eee1601_capture_source,
9115 .unsol_event = alc883_eee1601_unsol_event, 9479 .unsol_event = alc_sku_unsol_event,
9116 .init_hook = alc883_eee1601_inithook, 9480 .init_hook = alc883_eee1601_inithook,
9117 }, 9481 },
9118 [ALC1200_ASUS_P5Q] = { 9482 [ALC1200_ASUS_P5Q] = {
@@ -9127,6 +9491,32 @@ static struct alc_config_preset alc883_presets[] = {
9127 .channel_mode = alc883_sixstack_modes, 9491 .channel_mode = alc883_sixstack_modes,
9128 .input_mux = &alc883_capture_source, 9492 .input_mux = &alc883_capture_source,
9129 }, 9493 },
9494 [ALC889A_MB31] = {
9495 .mixers = { alc889A_mb31_mixer, alc883_chmode_mixer},
9496 .init_verbs = { alc883_init_verbs, alc889A_mb31_verbs,
9497 alc880_gpio1_init_verbs },
9498 .adc_nids = alc883_adc_nids,
9499 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
9500 .dac_nids = alc883_dac_nids,
9501 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9502 .channel_mode = alc889A_mb31_6ch_modes,
9503 .num_channel_mode = ARRAY_SIZE(alc889A_mb31_6ch_modes),
9504 .input_mux = &alc889A_mb31_capture_source,
9505 .dig_out_nid = ALC883_DIGOUT_NID,
9506 .unsol_event = alc889A_mb31_unsol_event,
9507 .init_hook = alc889A_mb31_automute,
9508 },
9509 [ALC883_SONY_VAIO_TT] = {
9510 .mixers = { alc883_vaiott_mixer },
9511 .init_verbs = { alc883_init_verbs, alc883_vaiott_verbs },
9512 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9513 .dac_nids = alc883_dac_nids,
9514 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9515 .channel_mode = alc883_3ST_2ch_modes,
9516 .input_mux = &alc883_capture_source,
9517 .unsol_event = alc_automute_amp_unsol_event,
9518 .init_hook = alc883_vaiott_init_hook,
9519 },
9130}; 9520};
9131 9521
9132 9522
@@ -9155,7 +9545,6 @@ static void alc883_auto_init_multi_out(struct hda_codec *codec)
9155 struct alc_spec *spec = codec->spec; 9545 struct alc_spec *spec = codec->spec;
9156 int i; 9546 int i;
9157 9547
9158 alc_subsystem_id(codec, 0x15, 0x1b, 0x14);
9159 for (i = 0; i <= HDA_SIDE; i++) { 9548 for (i = 0; i <= HDA_SIDE; i++) {
9160 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 9549 hda_nid_t nid = spec->autocfg.line_out_pins[i];
9161 int pin_type = get_pin_type(spec->autocfg.line_out_type); 9550 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -9273,10 +9662,18 @@ static int patch_alc883(struct hda_codec *codec)
9273 board_config = snd_hda_check_board_config(codec, ALC883_MODEL_LAST, 9662 board_config = snd_hda_check_board_config(codec, ALC883_MODEL_LAST,
9274 alc883_models, 9663 alc883_models,
9275 alc883_cfg_tbl); 9664 alc883_cfg_tbl);
9276 if (board_config < 0) { 9665 if (board_config < 0 || board_config >= ALC883_MODEL_LAST) {
9277 printk(KERN_INFO "hda_codec: Unknown model for ALC883, " 9666 /* Pick up systems that don't supply PCI SSID */
9278 "trying auto-probe from BIOS...\n"); 9667 switch (codec->subsystem_id) {
9279 board_config = ALC883_AUTO; 9668 case 0x106b3600: /* Macbook 3.1 */
9669 board_config = ALC889A_MB31;
9670 break;
9671 default:
9672 printk(KERN_INFO
9673 "hda_codec: Unknown model for %s, trying "
9674 "auto-probe from BIOS...\n", codec->chip_name);
9675 board_config = ALC883_AUTO;
9676 }
9280 } 9677 }
9281 9678
9282 if (board_config == ALC883_AUTO) { 9679 if (board_config == ALC883_AUTO) {
@@ -9304,13 +9701,6 @@ static int patch_alc883(struct hda_codec *codec)
9304 9701
9305 switch (codec->vendor_id) { 9702 switch (codec->vendor_id) {
9306 case 0x10ec0888: 9703 case 0x10ec0888:
9307 if (codec->revision_id == 0x100101) {
9308 spec->stream_name_analog = "ALC1200 Analog";
9309 spec->stream_name_digital = "ALC1200 Digital";
9310 } else {
9311 spec->stream_name_analog = "ALC888 Analog";
9312 spec->stream_name_digital = "ALC888 Digital";
9313 }
9314 if (!spec->num_adc_nids) { 9704 if (!spec->num_adc_nids) {
9315 spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids); 9705 spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids);
9316 spec->adc_nids = alc883_adc_nids; 9706 spec->adc_nids = alc883_adc_nids;
@@ -9318,10 +9708,9 @@ static int patch_alc883(struct hda_codec *codec)
9318 if (!spec->capsrc_nids) 9708 if (!spec->capsrc_nids)
9319 spec->capsrc_nids = alc883_capsrc_nids; 9709 spec->capsrc_nids = alc883_capsrc_nids;
9320 spec->capture_style = CAPT_MIX; /* matrix-style capture */ 9710 spec->capture_style = CAPT_MIX; /* matrix-style capture */
9711 spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */
9321 break; 9712 break;
9322 case 0x10ec0889: 9713 case 0x10ec0889:
9323 spec->stream_name_analog = "ALC889 Analog";
9324 spec->stream_name_digital = "ALC889 Digital";
9325 if (!spec->num_adc_nids) { 9714 if (!spec->num_adc_nids) {
9326 spec->num_adc_nids = ARRAY_SIZE(alc889_adc_nids); 9715 spec->num_adc_nids = ARRAY_SIZE(alc889_adc_nids);
9327 spec->adc_nids = alc889_adc_nids; 9716 spec->adc_nids = alc889_adc_nids;
@@ -9332,8 +9721,6 @@ static int patch_alc883(struct hda_codec *codec)
9332 capture */ 9721 capture */
9333 break; 9722 break;
9334 default: 9723 default:
9335 spec->stream_name_analog = "ALC883 Analog";
9336 spec->stream_name_digital = "ALC883 Digital";
9337 if (!spec->num_adc_nids) { 9724 if (!spec->num_adc_nids) {
9338 spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids); 9725 spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids);
9339 spec->adc_nids = alc883_adc_nids; 9726 spec->adc_nids = alc883_adc_nids;
@@ -9413,24 +9800,6 @@ static struct snd_kcontrol_new alc262_base_mixer[] = {
9413 { } /* end */ 9800 { } /* end */
9414}; 9801};
9415 9802
9416static struct snd_kcontrol_new alc262_hippo1_mixer[] = {
9417 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
9418 HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
9419 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
9420 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
9421 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
9422 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
9423 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
9424 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
9425 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
9426 HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
9427 HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
9428 HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
9429 /*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/
9430 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
9431 { } /* end */
9432};
9433
9434/* update HP, line and mono-out pins according to the master switch */ 9803/* update HP, line and mono-out pins according to the master switch */
9435static void alc262_hp_master_update(struct hda_codec *codec) 9804static void alc262_hp_master_update(struct hda_codec *codec)
9436{ 9805{
@@ -9486,14 +9855,7 @@ static void alc262_hp_wildwest_unsol_event(struct hda_codec *codec,
9486 alc262_hp_wildwest_automute(codec); 9855 alc262_hp_wildwest_automute(codec);
9487} 9856}
9488 9857
9489static int alc262_hp_master_sw_get(struct snd_kcontrol *kcontrol, 9858#define alc262_hp_master_sw_get alc260_hp_master_sw_get
9490 struct snd_ctl_elem_value *ucontrol)
9491{
9492 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
9493 struct alc_spec *spec = codec->spec;
9494 *ucontrol->value.integer.value = spec->master_sw;
9495 return 0;
9496}
9497 9859
9498static int alc262_hp_master_sw_put(struct snd_kcontrol *kcontrol, 9860static int alc262_hp_master_sw_put(struct snd_kcontrol *kcontrol,
9499 struct snd_ctl_elem_value *ucontrol) 9861 struct snd_ctl_elem_value *ucontrol)
@@ -9509,14 +9871,17 @@ static int alc262_hp_master_sw_put(struct snd_kcontrol *kcontrol,
9509 return 1; 9871 return 1;
9510} 9872}
9511 9873
9874#define ALC262_HP_MASTER_SWITCH \
9875 { \
9876 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
9877 .name = "Master Playback Switch", \
9878 .info = snd_ctl_boolean_mono_info, \
9879 .get = alc262_hp_master_sw_get, \
9880 .put = alc262_hp_master_sw_put, \
9881 }
9882
9512static struct snd_kcontrol_new alc262_HP_BPC_mixer[] = { 9883static struct snd_kcontrol_new alc262_HP_BPC_mixer[] = {
9513 { 9884 ALC262_HP_MASTER_SWITCH,
9514 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
9515 .name = "Master Playback Switch",
9516 .info = snd_ctl_boolean_mono_info,
9517 .get = alc262_hp_master_sw_get,
9518 .put = alc262_hp_master_sw_put,
9519 },
9520 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 9885 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
9521 HDA_CODEC_MUTE("Front Playback Switch", 0x15, 0x0, HDA_OUTPUT), 9886 HDA_CODEC_MUTE("Front Playback Switch", 0x15, 0x0, HDA_OUTPUT),
9522 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), 9887 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
@@ -9540,13 +9905,7 @@ static struct snd_kcontrol_new alc262_HP_BPC_mixer[] = {
9540}; 9905};
9541 9906
9542static struct snd_kcontrol_new alc262_HP_BPC_WildWest_mixer[] = { 9907static struct snd_kcontrol_new alc262_HP_BPC_WildWest_mixer[] = {
9543 { 9908 ALC262_HP_MASTER_SWITCH,
9544 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
9545 .name = "Master Playback Switch",
9546 .info = snd_ctl_boolean_mono_info,
9547 .get = alc262_hp_master_sw_get,
9548 .put = alc262_hp_master_sw_put,
9549 },
9550 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 9909 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
9551 HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), 9910 HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
9552 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT), 9911 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
@@ -9573,32 +9932,13 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
9573}; 9932};
9574 9933
9575/* mute/unmute internal speaker according to the hp jack and mute state */ 9934/* mute/unmute internal speaker according to the hp jack and mute state */
9576static void alc262_hp_t5735_automute(struct hda_codec *codec, int force) 9935static void alc262_hp_t5735_init_hook(struct hda_codec *codec)
9577{ 9936{
9578 struct alc_spec *spec = codec->spec; 9937 struct alc_spec *spec = codec->spec;
9579 9938
9580 if (force || !spec->sense_updated) { 9939 spec->autocfg.hp_pins[0] = 0x15;
9581 unsigned int present; 9940 spec->autocfg.speaker_pins[0] = 0x0c; /* HACK: not actually a pin */
9582 present = snd_hda_codec_read(codec, 0x15, 0, 9941 alc_automute_amp(codec);
9583 AC_VERB_GET_PIN_SENSE, 0);
9584 spec->jack_present = (present & AC_PINSENSE_PRESENCE) != 0;
9585 spec->sense_updated = 1;
9586 }
9587 snd_hda_codec_amp_stereo(codec, 0x0c, HDA_OUTPUT, 0, HDA_AMP_MUTE,
9588 spec->jack_present ? HDA_AMP_MUTE : 0);
9589}
9590
9591static void alc262_hp_t5735_unsol_event(struct hda_codec *codec,
9592 unsigned int res)
9593{
9594 if ((res >> 26) != ALC880_HP_EVENT)
9595 return;
9596 alc262_hp_t5735_automute(codec, 1);
9597}
9598
9599static void alc262_hp_t5735_init_hook(struct hda_codec *codec)
9600{
9601 alc262_hp_t5735_automute(codec, 1);
9602} 9942}
9603 9943
9604static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = { 9944static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = {
@@ -9651,46 +9991,132 @@ static struct hda_input_mux alc262_hp_rp5700_capture_source = {
9651 }, 9991 },
9652}; 9992};
9653 9993
9654/* bind hp and internal speaker mute (with plug check) */ 9994/* bind hp and internal speaker mute (with plug check) as master switch */
9655static int alc262_sony_master_sw_put(struct snd_kcontrol *kcontrol, 9995static void alc262_hippo_master_update(struct hda_codec *codec)
9656 struct snd_ctl_elem_value *ucontrol)
9657{ 9996{
9658 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 9997 struct alc_spec *spec = codec->spec;
9659 long *valp = ucontrol->value.integer.value; 9998 hda_nid_t hp_nid = spec->autocfg.hp_pins[0];
9660 int change; 9999 hda_nid_t line_nid = spec->autocfg.line_out_pins[0];
10000 hda_nid_t speaker_nid = spec->autocfg.speaker_pins[0];
10001 unsigned int mute;
9661 10002
9662 /* change hp mute */ 10003 /* HP */
9663 change = snd_hda_codec_amp_update(codec, 0x15, 0, HDA_OUTPUT, 0, 10004 mute = spec->master_sw ? 0 : HDA_AMP_MUTE;
9664 HDA_AMP_MUTE, 10005 snd_hda_codec_amp_stereo(codec, hp_nid, HDA_OUTPUT, 0,
9665 valp[0] ? 0 : HDA_AMP_MUTE); 10006 HDA_AMP_MUTE, mute);
9666 change |= snd_hda_codec_amp_update(codec, 0x15, 1, HDA_OUTPUT, 0, 10007 /* mute internal speaker per jack sense */
9667 HDA_AMP_MUTE, 10008 if (spec->jack_present)
9668 valp[1] ? 0 : HDA_AMP_MUTE); 10009 mute = HDA_AMP_MUTE;
9669 if (change) { 10010 if (line_nid)
9670 /* change speaker according to HP jack state */ 10011 snd_hda_codec_amp_stereo(codec, line_nid, HDA_OUTPUT, 0,
9671 struct alc_spec *spec = codec->spec;
9672 unsigned int mute;
9673 if (spec->jack_present)
9674 mute = HDA_AMP_MUTE;
9675 else
9676 mute = snd_hda_codec_amp_read(codec, 0x15, 0,
9677 HDA_OUTPUT, 0);
9678 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
9679 HDA_AMP_MUTE, mute); 10012 HDA_AMP_MUTE, mute);
10013 if (speaker_nid && speaker_nid != line_nid)
10014 snd_hda_codec_amp_stereo(codec, speaker_nid, HDA_OUTPUT, 0,
10015 HDA_AMP_MUTE, mute);
10016}
10017
10018#define alc262_hippo_master_sw_get alc262_hp_master_sw_get
10019
10020static int alc262_hippo_master_sw_put(struct snd_kcontrol *kcontrol,
10021 struct snd_ctl_elem_value *ucontrol)
10022{
10023 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
10024 struct alc_spec *spec = codec->spec;
10025 int val = !!*ucontrol->value.integer.value;
10026
10027 if (val == spec->master_sw)
10028 return 0;
10029 spec->master_sw = val;
10030 alc262_hippo_master_update(codec);
10031 return 1;
10032}
10033
10034#define ALC262_HIPPO_MASTER_SWITCH \
10035 { \
10036 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
10037 .name = "Master Playback Switch", \
10038 .info = snd_ctl_boolean_mono_info, \
10039 .get = alc262_hippo_master_sw_get, \
10040 .put = alc262_hippo_master_sw_put, \
9680 } 10041 }
9681 return change; 10042
10043static struct snd_kcontrol_new alc262_hippo_mixer[] = {
10044 ALC262_HIPPO_MASTER_SWITCH,
10045 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
10046 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
10047 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
10048 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
10049 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
10050 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
10051 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
10052 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
10053 HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
10054 HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
10055 HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
10056 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
10057 { } /* end */
10058};
10059
10060static struct snd_kcontrol_new alc262_hippo1_mixer[] = {
10061 HDA_CODEC_VOLUME("Master Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
10062 ALC262_HIPPO_MASTER_SWITCH,
10063 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
10064 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
10065 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
10066 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
10067 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
10068 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
10069 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
10070 HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
10071 HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
10072 HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
10073 { } /* end */
10074};
10075
10076/* mute/unmute internal speaker according to the hp jack and mute state */
10077static void alc262_hippo_automute(struct hda_codec *codec)
10078{
10079 struct alc_spec *spec = codec->spec;
10080 hda_nid_t hp_nid = spec->autocfg.hp_pins[0];
10081 unsigned int present;
10082
10083 /* need to execute and sync at first */
10084 snd_hda_codec_read(codec, hp_nid, 0, AC_VERB_SET_PIN_SENSE, 0);
10085 present = snd_hda_codec_read(codec, hp_nid, 0,
10086 AC_VERB_GET_PIN_SENSE, 0);
10087 spec->jack_present = (present & 0x80000000) != 0;
10088 alc262_hippo_master_update(codec);
10089}
10090
10091static void alc262_hippo_unsol_event(struct hda_codec *codec, unsigned int res)
10092{
10093 if ((res >> 26) != ALC880_HP_EVENT)
10094 return;
10095 alc262_hippo_automute(codec);
10096}
10097
10098static void alc262_hippo_init_hook(struct hda_codec *codec)
10099{
10100 struct alc_spec *spec = codec->spec;
10101
10102 spec->autocfg.hp_pins[0] = 0x15;
10103 spec->autocfg.speaker_pins[0] = 0x14;
10104 alc262_hippo_automute(codec);
10105}
10106
10107static void alc262_hippo1_init_hook(struct hda_codec *codec)
10108{
10109 struct alc_spec *spec = codec->spec;
10110
10111 spec->autocfg.hp_pins[0] = 0x1b;
10112 spec->autocfg.speaker_pins[0] = 0x14;
10113 alc262_hippo_automute(codec);
9682} 10114}
9683 10115
10116
9684static struct snd_kcontrol_new alc262_sony_mixer[] = { 10117static struct snd_kcontrol_new alc262_sony_mixer[] = {
9685 HDA_CODEC_VOLUME("Master Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 10118 HDA_CODEC_VOLUME("Master Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
9686 { 10119 ALC262_HIPPO_MASTER_SWITCH,
9687 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
9688 .name = "Master Playback Switch",
9689 .info = snd_hda_mixer_amp_switch_info,
9690 .get = snd_hda_mixer_amp_switch_get,
9691 .put = alc262_sony_master_sw_put,
9692 .private_value = HDA_COMPOSE_AMP_VAL(0x15, 3, 0, HDA_OUTPUT),
9693 },
9694 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), 10120 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
9695 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), 10121 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
9696 HDA_CODEC_VOLUME("ATAPI Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), 10122 HDA_CODEC_VOLUME("ATAPI Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
@@ -9699,8 +10125,8 @@ static struct snd_kcontrol_new alc262_sony_mixer[] = {
9699}; 10125};
9700 10126
9701static struct snd_kcontrol_new alc262_benq_t31_mixer[] = { 10127static struct snd_kcontrol_new alc262_benq_t31_mixer[] = {
9702 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 10128 HDA_CODEC_VOLUME("Master Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
9703 HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT), 10129 ALC262_HIPPO_MASTER_SWITCH,
9704 HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT), 10130 HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
9705 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), 10131 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
9706 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), 10132 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
@@ -9741,34 +10167,15 @@ static struct hda_verb alc262_tyan_verbs[] = {
9741}; 10167};
9742 10168
9743/* unsolicited event for HP jack sensing */ 10169/* unsolicited event for HP jack sensing */
9744static void alc262_tyan_automute(struct hda_codec *codec) 10170static void alc262_tyan_init_hook(struct hda_codec *codec)
9745{ 10171{
9746 unsigned int mute; 10172 struct alc_spec *spec = codec->spec;
9747 unsigned int present;
9748 10173
9749 snd_hda_codec_read(codec, 0x1b, 0, AC_VERB_SET_PIN_SENSE, 0); 10174 spec->autocfg.hp_pins[0] = 0x1b;
9750 present = snd_hda_codec_read(codec, 0x1b, 0, 10175 spec->autocfg.speaker_pins[0] = 0x15;
9751 AC_VERB_GET_PIN_SENSE, 0); 10176 alc_automute_amp(codec);
9752 present = (present & 0x80000000) != 0;
9753 if (present) {
9754 /* mute line output on ATX panel */
9755 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
9756 HDA_AMP_MUTE, HDA_AMP_MUTE);
9757 } else {
9758 /* unmute line output if necessary */
9759 mute = snd_hda_codec_amp_read(codec, 0x1b, 0, HDA_OUTPUT, 0);
9760 snd_hda_codec_amp_stereo(codec, 0x15, HDA_OUTPUT, 0,
9761 HDA_AMP_MUTE, mute);
9762 }
9763} 10177}
9764 10178
9765static void alc262_tyan_unsol_event(struct hda_codec *codec,
9766 unsigned int res)
9767{
9768 if ((res >> 26) != ALC880_HP_EVENT)
9769 return;
9770 alc262_tyan_automute(codec);
9771}
9772 10179
9773#define alc262_capture_mixer alc882_capture_mixer 10180#define alc262_capture_mixer alc882_capture_mixer
9774#define alc262_capture_alt_mixer alc882_capture_alt_mixer 10181#define alc262_capture_alt_mixer alc882_capture_alt_mixer
@@ -9923,99 +10330,25 @@ static void alc262_dmic_automute(struct hda_codec *codec)
9923 AC_VERB_SET_CONNECT_SEL, present ? 0x0 : 0x09); 10330 AC_VERB_SET_CONNECT_SEL, present ? 0x0 : 0x09);
9924} 10331}
9925 10332
9926/* toggle speaker-output according to the hp-jack state */
9927static void alc262_toshiba_s06_speaker_automute(struct hda_codec *codec)
9928{
9929 unsigned int present;
9930 unsigned char bits;
9931
9932 present = snd_hda_codec_read(codec, 0x15, 0,
9933 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
9934 bits = present ? 0 : PIN_OUT;
9935 snd_hda_codec_write(codec, 0x14, 0,
9936 AC_VERB_SET_PIN_WIDGET_CONTROL, bits);
9937}
9938
9939
9940 10333
9941/* unsolicited event for HP jack sensing */ 10334/* unsolicited event for HP jack sensing */
9942static void alc262_toshiba_s06_unsol_event(struct hda_codec *codec, 10335static void alc262_toshiba_s06_unsol_event(struct hda_codec *codec,
9943 unsigned int res) 10336 unsigned int res)
9944{ 10337{
9945 if ((res >> 26) == ALC880_HP_EVENT)
9946 alc262_toshiba_s06_speaker_automute(codec);
9947 if ((res >> 26) == ALC880_MIC_EVENT) 10338 if ((res >> 26) == ALC880_MIC_EVENT)
9948 alc262_dmic_automute(codec); 10339 alc262_dmic_automute(codec);
9949 10340 else
10341 alc_sku_unsol_event(codec, res);
9950} 10342}
9951 10343
9952static void alc262_toshiba_s06_init_hook(struct hda_codec *codec) 10344static void alc262_toshiba_s06_init_hook(struct hda_codec *codec)
9953{ 10345{
9954 alc262_toshiba_s06_speaker_automute(codec);
9955 alc262_dmic_automute(codec);
9956}
9957
9958/* mute/unmute internal speaker according to the hp jack and mute state */
9959static void alc262_hippo_automute(struct hda_codec *codec)
9960{
9961 struct alc_spec *spec = codec->spec; 10346 struct alc_spec *spec = codec->spec;
9962 unsigned int mute;
9963 unsigned int present;
9964
9965 /* need to execute and sync at first */
9966 snd_hda_codec_read(codec, 0x15, 0, AC_VERB_SET_PIN_SENSE, 0);
9967 present = snd_hda_codec_read(codec, 0x15, 0,
9968 AC_VERB_GET_PIN_SENSE, 0);
9969 spec->jack_present = (present & 0x80000000) != 0;
9970 if (spec->jack_present) {
9971 /* mute internal speaker */
9972 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
9973 HDA_AMP_MUTE, HDA_AMP_MUTE);
9974 } else {
9975 /* unmute internal speaker if necessary */
9976 mute = snd_hda_codec_amp_read(codec, 0x15, 0, HDA_OUTPUT, 0);
9977 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
9978 HDA_AMP_MUTE, mute);
9979 }
9980}
9981
9982/* unsolicited event for HP jack sensing */
9983static void alc262_hippo_unsol_event(struct hda_codec *codec,
9984 unsigned int res)
9985{
9986 if ((res >> 26) != ALC880_HP_EVENT)
9987 return;
9988 alc262_hippo_automute(codec);
9989}
9990
9991static void alc262_hippo1_automute(struct hda_codec *codec)
9992{
9993 unsigned int mute;
9994 unsigned int present;
9995 10347
9996 snd_hda_codec_read(codec, 0x1b, 0, AC_VERB_SET_PIN_SENSE, 0); 10348 spec->autocfg.hp_pins[0] = 0x15;
9997 present = snd_hda_codec_read(codec, 0x1b, 0, 10349 spec->autocfg.speaker_pins[0] = 0x14;
9998 AC_VERB_GET_PIN_SENSE, 0); 10350 alc_automute_pin(codec);
9999 present = (present & 0x80000000) != 0; 10351 alc262_dmic_automute(codec);
10000 if (present) {
10001 /* mute internal speaker */
10002 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
10003 HDA_AMP_MUTE, HDA_AMP_MUTE);
10004 } else {
10005 /* unmute internal speaker if necessary */
10006 mute = snd_hda_codec_amp_read(codec, 0x1b, 0, HDA_OUTPUT, 0);
10007 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
10008 HDA_AMP_MUTE, mute);
10009 }
10010}
10011
10012/* unsolicited event for HP jack sensing */
10013static void alc262_hippo1_unsol_event(struct hda_codec *codec,
10014 unsigned int res)
10015{
10016 if ((res >> 26) != ALC880_HP_EVENT)
10017 return;
10018 alc262_hippo1_automute(codec);
10019} 10352}
10020 10353
10021/* 10354/*
@@ -10285,14 +10618,7 @@ static struct snd_kcontrol_new alc262_lenovo_3000_mixer[] = {
10285 10618
10286static struct snd_kcontrol_new alc262_toshiba_rx1_mixer[] = { 10619static struct snd_kcontrol_new alc262_toshiba_rx1_mixer[] = {
10287 HDA_BIND_VOL("Master Playback Volume", &alc262_fujitsu_bind_master_vol), 10620 HDA_BIND_VOL("Master Playback Volume", &alc262_fujitsu_bind_master_vol),
10288 { 10621 ALC262_HIPPO_MASTER_SWITCH,
10289 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
10290 .name = "Master Playback Switch",
10291 .info = snd_hda_mixer_amp_switch_info,
10292 .get = snd_hda_mixer_amp_switch_get,
10293 .put = alc262_sony_master_sw_put,
10294 .private_value = HDA_COMPOSE_AMP_VAL(0x15, 3, 0, HDA_OUTPUT),
10295 },
10296 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), 10622 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
10297 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), 10623 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
10298 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), 10624 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
@@ -10639,31 +10965,46 @@ static struct hda_verb alc262_HP_BPC_init_verbs[] = {
10639 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20}, 10965 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20},
10640 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20}, 10966 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20},
10641 10967
10642 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7023 }, 10968 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 },
10643 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 }, 10969 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 },
10644 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 }, 10970 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 },
10645 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 0x7023 }, 10971 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 },
10646 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 }, 10972 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 },
10647 {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 }, 10973 {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000 },
10648 10974
10649 10975
10650 /* FIXME: use matrix-type input source selection */ 10976 /* FIXME: use matrix-type input source selection */
10651 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */ 10977 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 0b, 12 */
10652 /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */ 10978 /* Input mixer1: only unmute Mic */
10653 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))}, 10979 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
10654 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x03 << 8))}, 10980 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x01 << 8))},
10655 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x02 << 8))}, 10981 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x02 << 8))},
10656 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x04 << 8))}, 10982 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
10983 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x04 << 8))},
10984 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x05 << 8))},
10985 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x06 << 8))},
10986 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x07 << 8))},
10987 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x08 << 8))},
10657 /* Input mixer2 */ 10988 /* Input mixer2 */
10658 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))}, 10989 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
10659 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x03 << 8))}, 10990 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x01 << 8))},
10660 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x02 << 8))}, 10991 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x02 << 8))},
10661 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x04 << 8))}, 10992 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
10993 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x04 << 8))},
10994 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x05 << 8))},
10995 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x06 << 8))},
10996 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x07 << 8))},
10997 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x08 << 8))},
10662 /* Input mixer3 */ 10998 /* Input mixer3 */
10663 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))}, 10999 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
10664 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x03 << 8))}, 11000 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x01 << 8))},
10665 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x02 << 8))}, 11001 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x02 << 8))},
10666 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x04 << 8))}, 11002 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
11003 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x04 << 8))},
11004 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x05 << 8))},
11005 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x06 << 8))},
11006 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x07 << 8))},
11007 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x08 << 8))},
10667 11008
10668 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, 11009 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
10669 11010
@@ -10843,6 +11184,8 @@ static int alc262_parse_auto_config(struct hda_codec *codec)
10843 if (err < 0) 11184 if (err < 0)
10844 return err; 11185 return err;
10845 11186
11187 alc_ssid_check(codec, 0x15, 0x14, 0x1b);
11188
10846 return 1; 11189 return 1;
10847} 11190}
10848 11191
@@ -10945,7 +11288,7 @@ static struct alc_config_preset alc262_presets[] = {
10945 .input_mux = &alc262_capture_source, 11288 .input_mux = &alc262_capture_source,
10946 }, 11289 },
10947 [ALC262_HIPPO] = { 11290 [ALC262_HIPPO] = {
10948 .mixers = { alc262_base_mixer }, 11291 .mixers = { alc262_hippo_mixer },
10949 .init_verbs = { alc262_init_verbs, alc262_hippo_unsol_verbs}, 11292 .init_verbs = { alc262_init_verbs, alc262_hippo_unsol_verbs},
10950 .num_dacs = ARRAY_SIZE(alc262_dac_nids), 11293 .num_dacs = ARRAY_SIZE(alc262_dac_nids),
10951 .dac_nids = alc262_dac_nids, 11294 .dac_nids = alc262_dac_nids,
@@ -10955,7 +11298,7 @@ static struct alc_config_preset alc262_presets[] = {
10955 .channel_mode = alc262_modes, 11298 .channel_mode = alc262_modes,
10956 .input_mux = &alc262_capture_source, 11299 .input_mux = &alc262_capture_source,
10957 .unsol_event = alc262_hippo_unsol_event, 11300 .unsol_event = alc262_hippo_unsol_event,
10958 .init_hook = alc262_hippo_automute, 11301 .init_hook = alc262_hippo_init_hook,
10959 }, 11302 },
10960 [ALC262_HIPPO_1] = { 11303 [ALC262_HIPPO_1] = {
10961 .mixers = { alc262_hippo1_mixer }, 11304 .mixers = { alc262_hippo1_mixer },
@@ -10967,8 +11310,8 @@ static struct alc_config_preset alc262_presets[] = {
10967 .num_channel_mode = ARRAY_SIZE(alc262_modes), 11310 .num_channel_mode = ARRAY_SIZE(alc262_modes),
10968 .channel_mode = alc262_modes, 11311 .channel_mode = alc262_modes,
10969 .input_mux = &alc262_capture_source, 11312 .input_mux = &alc262_capture_source,
10970 .unsol_event = alc262_hippo1_unsol_event, 11313 .unsol_event = alc262_hippo_unsol_event,
10971 .init_hook = alc262_hippo1_automute, 11314 .init_hook = alc262_hippo1_init_hook,
10972 }, 11315 },
10973 [ALC262_FUJITSU] = { 11316 [ALC262_FUJITSU] = {
10974 .mixers = { alc262_fujitsu_mixer }, 11317 .mixers = { alc262_fujitsu_mixer },
@@ -11030,7 +11373,7 @@ static struct alc_config_preset alc262_presets[] = {
11030 .num_channel_mode = ARRAY_SIZE(alc262_modes), 11373 .num_channel_mode = ARRAY_SIZE(alc262_modes),
11031 .channel_mode = alc262_modes, 11374 .channel_mode = alc262_modes,
11032 .input_mux = &alc262_capture_source, 11375 .input_mux = &alc262_capture_source,
11033 .unsol_event = alc262_hp_t5735_unsol_event, 11376 .unsol_event = alc_automute_amp_unsol_event,
11034 .init_hook = alc262_hp_t5735_init_hook, 11377 .init_hook = alc262_hp_t5735_init_hook,
11035 }, 11378 },
11036 [ALC262_HP_RP5700] = { 11379 [ALC262_HP_RP5700] = {
@@ -11062,7 +11405,7 @@ static struct alc_config_preset alc262_presets[] = {
11062 .channel_mode = alc262_modes, 11405 .channel_mode = alc262_modes,
11063 .input_mux = &alc262_capture_source, 11406 .input_mux = &alc262_capture_source,
11064 .unsol_event = alc262_hippo_unsol_event, 11407 .unsol_event = alc262_hippo_unsol_event,
11065 .init_hook = alc262_hippo_automute, 11408 .init_hook = alc262_hippo_init_hook,
11066 }, 11409 },
11067 [ALC262_BENQ_T31] = { 11410 [ALC262_BENQ_T31] = {
11068 .mixers = { alc262_benq_t31_mixer }, 11411 .mixers = { alc262_benq_t31_mixer },
@@ -11074,7 +11417,7 @@ static struct alc_config_preset alc262_presets[] = {
11074 .channel_mode = alc262_modes, 11417 .channel_mode = alc262_modes,
11075 .input_mux = &alc262_capture_source, 11418 .input_mux = &alc262_capture_source,
11076 .unsol_event = alc262_hippo_unsol_event, 11419 .unsol_event = alc262_hippo_unsol_event,
11077 .init_hook = alc262_hippo_automute, 11420 .init_hook = alc262_hippo_init_hook,
11078 }, 11421 },
11079 [ALC262_ULTRA] = { 11422 [ALC262_ULTRA] = {
11080 .mixers = { alc262_ultra_mixer }, 11423 .mixers = { alc262_ultra_mixer },
@@ -11139,7 +11482,7 @@ static struct alc_config_preset alc262_presets[] = {
11139 .channel_mode = alc262_modes, 11482 .channel_mode = alc262_modes,
11140 .input_mux = &alc262_capture_source, 11483 .input_mux = &alc262_capture_source,
11141 .unsol_event = alc262_hippo_unsol_event, 11484 .unsol_event = alc262_hippo_unsol_event,
11142 .init_hook = alc262_hippo_automute, 11485 .init_hook = alc262_hippo_init_hook,
11143 }, 11486 },
11144 [ALC262_TYAN] = { 11487 [ALC262_TYAN] = {
11145 .mixers = { alc262_tyan_mixer }, 11488 .mixers = { alc262_tyan_mixer },
@@ -11151,8 +11494,8 @@ static struct alc_config_preset alc262_presets[] = {
11151 .num_channel_mode = ARRAY_SIZE(alc262_modes), 11494 .num_channel_mode = ARRAY_SIZE(alc262_modes),
11152 .channel_mode = alc262_modes, 11495 .channel_mode = alc262_modes,
11153 .input_mux = &alc262_capture_source, 11496 .input_mux = &alc262_capture_source,
11154 .unsol_event = alc262_tyan_unsol_event, 11497 .unsol_event = alc_automute_amp_unsol_event,
11155 .init_hook = alc262_tyan_automute, 11498 .init_hook = alc262_tyan_init_hook,
11156 }, 11499 },
11157}; 11500};
11158 11501
@@ -11187,8 +11530,8 @@ static int patch_alc262(struct hda_codec *codec)
11187 alc262_cfg_tbl); 11530 alc262_cfg_tbl);
11188 11531
11189 if (board_config < 0) { 11532 if (board_config < 0) {
11190 printk(KERN_INFO "hda_codec: Unknown model for ALC262, " 11533 printk(KERN_INFO "hda_codec: Unknown model for %s, "
11191 "trying auto-probe from BIOS...\n"); 11534 "trying auto-probe from BIOS...\n", codec->chip_name);
11192 board_config = ALC262_AUTO; 11535 board_config = ALC262_AUTO;
11193 } 11536 }
11194 11537
@@ -11217,11 +11560,9 @@ static int patch_alc262(struct hda_codec *codec)
11217 if (board_config != ALC262_AUTO) 11560 if (board_config != ALC262_AUTO)
11218 setup_preset(spec, &alc262_presets[board_config]); 11561 setup_preset(spec, &alc262_presets[board_config]);
11219 11562
11220 spec->stream_name_analog = "ALC262 Analog";
11221 spec->stream_analog_playback = &alc262_pcm_analog_playback; 11563 spec->stream_analog_playback = &alc262_pcm_analog_playback;
11222 spec->stream_analog_capture = &alc262_pcm_analog_capture; 11564 spec->stream_analog_capture = &alc262_pcm_analog_capture;
11223 11565
11224 spec->stream_name_digital = "ALC262 Digital";
11225 spec->stream_digital_playback = &alc262_pcm_digital_playback; 11566 spec->stream_digital_playback = &alc262_pcm_digital_playback;
11226 spec->stream_digital_capture = &alc262_pcm_digital_capture; 11567 spec->stream_digital_capture = &alc262_pcm_digital_capture;
11227 11568
@@ -11296,6 +11637,17 @@ static struct snd_kcontrol_new alc268_base_mixer[] = {
11296 { } 11637 { }
11297}; 11638};
11298 11639
11640static struct snd_kcontrol_new alc268_toshiba_mixer[] = {
11641 /* output mixer control */
11642 HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
11643 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
11644 ALC262_HIPPO_MASTER_SWITCH,
11645 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
11646 HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
11647 HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
11648 { }
11649};
11650
11299/* bind Beep switches of both NID 0x0f and 0x10 */ 11651/* bind Beep switches of both NID 0x0f and 0x10 */
11300static struct hda_bind_ctls alc268_bind_beep_sw = { 11652static struct hda_bind_ctls alc268_bind_beep_sw = {
11301 .ops = &snd_hda_bind_sw, 11653 .ops = &snd_hda_bind_sw,
@@ -11319,8 +11671,6 @@ static struct hda_verb alc268_eapd_verbs[] = {
11319}; 11671};
11320 11672
11321/* Toshiba specific */ 11673/* Toshiba specific */
11322#define alc268_toshiba_automute alc262_hippo_automute
11323
11324static struct hda_verb alc268_toshiba_verbs[] = { 11674static struct hda_verb alc268_toshiba_verbs[] = {
11325 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, 11675 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
11326 { } /* end */ 11676 { } /* end */
@@ -11456,13 +11806,8 @@ static struct hda_verb alc268_acer_verbs[] = {
11456}; 11806};
11457 11807
11458/* unsolicited event for HP jack sensing */ 11808/* unsolicited event for HP jack sensing */
11459static void alc268_toshiba_unsol_event(struct hda_codec *codec, 11809#define alc268_toshiba_unsol_event alc262_hippo_unsol_event
11460 unsigned int res) 11810#define alc268_toshiba_init_hook alc262_hippo_init_hook
11461{
11462 if ((res >> 26) != ALC880_HP_EVENT)
11463 return;
11464 alc268_toshiba_automute(codec);
11465}
11466 11811
11467static void alc268_acer_unsol_event(struct hda_codec *codec, 11812static void alc268_acer_unsol_event(struct hda_codec *codec,
11468 unsigned int res) 11813 unsigned int res)
@@ -11537,30 +11882,15 @@ static struct hda_verb alc268_dell_verbs[] = {
11537}; 11882};
11538 11883
11539/* mute/unmute internal speaker according to the hp jack and mute state */ 11884/* mute/unmute internal speaker according to the hp jack and mute state */
11540static void alc268_dell_automute(struct hda_codec *codec) 11885static void alc268_dell_init_hook(struct hda_codec *codec)
11541{ 11886{
11542 unsigned int present; 11887 struct alc_spec *spec = codec->spec;
11543 unsigned int mute;
11544
11545 present = snd_hda_codec_read(codec, 0x15, 0, AC_VERB_GET_PIN_SENSE, 0);
11546 if (present & 0x80000000)
11547 mute = HDA_AMP_MUTE;
11548 else
11549 mute = snd_hda_codec_amp_read(codec, 0x15, 0, HDA_OUTPUT, 0);
11550 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
11551 HDA_AMP_MUTE, mute);
11552}
11553 11888
11554static void alc268_dell_unsol_event(struct hda_codec *codec, 11889 spec->autocfg.hp_pins[0] = 0x15;
11555 unsigned int res) 11890 spec->autocfg.speaker_pins[0] = 0x14;
11556{ 11891 alc_automute_pin(codec);
11557 if ((res >> 26) != ALC880_HP_EVENT)
11558 return;
11559 alc268_dell_automute(codec);
11560} 11892}
11561 11893
11562#define alc268_dell_init_hook alc268_dell_automute
11563
11564static struct snd_kcontrol_new alc267_quanta_il1_mixer[] = { 11894static struct snd_kcontrol_new alc267_quanta_il1_mixer[] = {
11565 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x2, 0x0, HDA_OUTPUT), 11895 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x2, 0x0, HDA_OUTPUT),
11566 HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT), 11896 HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
@@ -11579,16 +11909,6 @@ static struct hda_verb alc267_quanta_il1_verbs[] = {
11579 { } 11909 { }
11580}; 11910};
11581 11911
11582static void alc267_quanta_il1_hp_automute(struct hda_codec *codec)
11583{
11584 unsigned int present;
11585
11586 present = snd_hda_codec_read(codec, 0x15, 0, AC_VERB_GET_PIN_SENSE, 0)
11587 & AC_PINSENSE_PRESENCE;
11588 snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
11589 present ? 0 : PIN_OUT);
11590}
11591
11592static void alc267_quanta_il1_mic_automute(struct hda_codec *codec) 11912static void alc267_quanta_il1_mic_automute(struct hda_codec *codec)
11593{ 11913{
11594 unsigned int present; 11914 unsigned int present;
@@ -11600,9 +11920,13 @@ static void alc267_quanta_il1_mic_automute(struct hda_codec *codec)
11600 present ? 0x00 : 0x01); 11920 present ? 0x00 : 0x01);
11601} 11921}
11602 11922
11603static void alc267_quanta_il1_automute(struct hda_codec *codec) 11923static void alc267_quanta_il1_init_hook(struct hda_codec *codec)
11604{ 11924{
11605 alc267_quanta_il1_hp_automute(codec); 11925 struct alc_spec *spec = codec->spec;
11926
11927 spec->autocfg.hp_pins[0] = 0x15;
11928 spec->autocfg.speaker_pins[0] = 0x14;
11929 alc_automute_pin(codec);
11606 alc267_quanta_il1_mic_automute(codec); 11930 alc267_quanta_il1_mic_automute(codec);
11607} 11931}
11608 11932
@@ -11610,12 +11934,12 @@ static void alc267_quanta_il1_unsol_event(struct hda_codec *codec,
11610 unsigned int res) 11934 unsigned int res)
11611{ 11935{
11612 switch (res >> 26) { 11936 switch (res >> 26) {
11613 case ALC880_HP_EVENT:
11614 alc267_quanta_il1_hp_automute(codec);
11615 break;
11616 case ALC880_MIC_EVENT: 11937 case ALC880_MIC_EVENT:
11617 alc267_quanta_il1_mic_automute(codec); 11938 alc267_quanta_il1_mic_automute(codec);
11618 break; 11939 break;
11940 default:
11941 alc_sku_unsol_event(codec, res);
11942 break;
11619 } 11943 }
11620} 11944}
11621 11945
@@ -12063,16 +12387,16 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = {
12063 ALC268_ACER_ASPIRE_ONE), 12387 ALC268_ACER_ASPIRE_ONE),
12064 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL), 12388 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
12065 SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL), 12389 SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
12066 SND_PCI_QUIRK(0x103c, 0x30cc, "TOSHIBA", ALC268_TOSHIBA), 12390 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP TX25xx series",
12067 SND_PCI_QUIRK(0x103c, 0x30f1, "HP TX25xx series", ALC268_TOSHIBA), 12391 ALC268_TOSHIBA),
12068 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), 12392 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
12069 SND_PCI_QUIRK(0x1179, 0xff10, "TOSHIBA A205", ALC268_TOSHIBA), 12393 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
12070 SND_PCI_QUIRK(0x1179, 0xff50, "TOSHIBA A305", ALC268_TOSHIBA), 12394 SND_PCI_QUIRK_MASK(0x1179, 0xff00, 0xff00, "TOSHIBA A/Lx05",
12071 SND_PCI_QUIRK(0x1179, 0xff64, "TOSHIBA L305", ALC268_TOSHIBA), 12395 ALC268_TOSHIBA),
12072 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), 12396 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
12073 SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER), 12397 SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER),
12074 SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1), 12398 SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1),
12075 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO), 12399 SND_PCI_QUIRK(0x1854, 0x1775, "LG R510", ALC268_DELL),
12076 {} 12400 {}
12077}; 12401};
12078 12402
@@ -12090,7 +12414,7 @@ static struct alc_config_preset alc268_presets[] = {
12090 .channel_mode = alc268_modes, 12414 .channel_mode = alc268_modes,
12091 .input_mux = &alc268_capture_source, 12415 .input_mux = &alc268_capture_source,
12092 .unsol_event = alc267_quanta_il1_unsol_event, 12416 .unsol_event = alc267_quanta_il1_unsol_event,
12093 .init_hook = alc267_quanta_il1_automute, 12417 .init_hook = alc267_quanta_il1_init_hook,
12094 }, 12418 },
12095 [ALC268_3ST] = { 12419 [ALC268_3ST] = {
12096 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, 12420 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer,
@@ -12108,7 +12432,7 @@ static struct alc_config_preset alc268_presets[] = {
12108 .input_mux = &alc268_capture_source, 12432 .input_mux = &alc268_capture_source,
12109 }, 12433 },
12110 [ALC268_TOSHIBA] = { 12434 [ALC268_TOSHIBA] = {
12111 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, 12435 .mixers = { alc268_toshiba_mixer, alc268_capture_alt_mixer,
12112 alc268_beep_mixer }, 12436 alc268_beep_mixer },
12113 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 12437 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
12114 alc268_toshiba_verbs }, 12438 alc268_toshiba_verbs },
@@ -12122,7 +12446,7 @@ static struct alc_config_preset alc268_presets[] = {
12122 .channel_mode = alc268_modes, 12446 .channel_mode = alc268_modes,
12123 .input_mux = &alc268_capture_source, 12447 .input_mux = &alc268_capture_source,
12124 .unsol_event = alc268_toshiba_unsol_event, 12448 .unsol_event = alc268_toshiba_unsol_event,
12125 .init_hook = alc268_toshiba_automute, 12449 .init_hook = alc268_toshiba_init_hook,
12126 }, 12450 },
12127 [ALC268_ACER] = { 12451 [ALC268_ACER] = {
12128 .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer, 12452 .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer,
@@ -12185,7 +12509,7 @@ static struct alc_config_preset alc268_presets[] = {
12185 .hp_nid = 0x02, 12509 .hp_nid = 0x02,
12186 .num_channel_mode = ARRAY_SIZE(alc268_modes), 12510 .num_channel_mode = ARRAY_SIZE(alc268_modes),
12187 .channel_mode = alc268_modes, 12511 .channel_mode = alc268_modes,
12188 .unsol_event = alc268_dell_unsol_event, 12512 .unsol_event = alc_sku_unsol_event,
12189 .init_hook = alc268_dell_init_hook, 12513 .init_hook = alc268_dell_init_hook,
12190 .input_mux = &alc268_capture_source, 12514 .input_mux = &alc268_capture_source,
12191 }, 12515 },
@@ -12205,7 +12529,7 @@ static struct alc_config_preset alc268_presets[] = {
12205 .channel_mode = alc268_modes, 12529 .channel_mode = alc268_modes,
12206 .input_mux = &alc268_capture_source, 12530 .input_mux = &alc268_capture_source,
12207 .unsol_event = alc268_toshiba_unsol_event, 12531 .unsol_event = alc268_toshiba_unsol_event,
12208 .init_hook = alc268_toshiba_automute 12532 .init_hook = alc268_toshiba_init_hook
12209 }, 12533 },
12210#ifdef CONFIG_SND_DEBUG 12534#ifdef CONFIG_SND_DEBUG
12211 [ALC268_TEST] = { 12535 [ALC268_TEST] = {
@@ -12243,8 +12567,8 @@ static int patch_alc268(struct hda_codec *codec)
12243 alc268_cfg_tbl); 12567 alc268_cfg_tbl);
12244 12568
12245 if (board_config < 0 || board_config >= ALC268_MODEL_LAST) { 12569 if (board_config < 0 || board_config >= ALC268_MODEL_LAST) {
12246 printk(KERN_INFO "hda_codec: Unknown model for ALC268, " 12570 printk(KERN_INFO "hda_codec: Unknown model for %s, "
12247 "trying auto-probe from BIOS...\n"); 12571 "trying auto-probe from BIOS...\n", codec->chip_name);
12248 board_config = ALC268_AUTO; 12572 board_config = ALC268_AUTO;
12249 } 12573 }
12250 12574
@@ -12265,14 +12589,6 @@ static int patch_alc268(struct hda_codec *codec)
12265 if (board_config != ALC268_AUTO) 12589 if (board_config != ALC268_AUTO)
12266 setup_preset(spec, &alc268_presets[board_config]); 12590 setup_preset(spec, &alc268_presets[board_config]);
12267 12591
12268 if (codec->vendor_id == 0x10ec0267) {
12269 spec->stream_name_analog = "ALC267 Analog";
12270 spec->stream_name_digital = "ALC267 Digital";
12271 } else {
12272 spec->stream_name_analog = "ALC268 Analog";
12273 spec->stream_name_digital = "ALC268 Digital";
12274 }
12275
12276 spec->stream_analog_playback = &alc268_pcm_analog_playback; 12592 spec->stream_analog_playback = &alc268_pcm_analog_playback;
12277 spec->stream_analog_capture = &alc268_pcm_analog_capture; 12593 spec->stream_analog_capture = &alc268_pcm_analog_capture;
12278 spec->stream_analog_alt_capture = &alc268_pcm_analog_alt_capture; 12594 spec->stream_analog_alt_capture = &alc268_pcm_analog_alt_capture;
@@ -13099,8 +13415,8 @@ static int patch_alc269(struct hda_codec *codec)
13099 alc269_cfg_tbl); 13415 alc269_cfg_tbl);
13100 13416
13101 if (board_config < 0) { 13417 if (board_config < 0) {
13102 printk(KERN_INFO "hda_codec: Unknown model for ALC269, " 13418 printk(KERN_INFO "hda_codec: Unknown model for %s, "
13103 "trying auto-probe from BIOS...\n"); 13419 "trying auto-probe from BIOS...\n", codec->chip_name);
13104 board_config = ALC269_AUTO; 13420 board_config = ALC269_AUTO;
13105 } 13421 }
13106 13422
@@ -13127,7 +13443,6 @@ static int patch_alc269(struct hda_codec *codec)
13127 if (board_config != ALC269_AUTO) 13443 if (board_config != ALC269_AUTO)
13128 setup_preset(spec, &alc269_presets[board_config]); 13444 setup_preset(spec, &alc269_presets[board_config]);
13129 13445
13130 spec->stream_name_analog = "ALC269 Analog";
13131 if (codec->subsystem_id == 0x17aa3bf8) { 13446 if (codec->subsystem_id == 0x17aa3bf8) {
13132 /* Due to a hardware problem on Lenovo Ideadpad, we need to 13447 /* Due to a hardware problem on Lenovo Ideadpad, we need to
13133 * fix the sample rate of analog I/O to 44.1kHz 13448 * fix the sample rate of analog I/O to 44.1kHz
@@ -13138,7 +13453,6 @@ static int patch_alc269(struct hda_codec *codec)
13138 spec->stream_analog_playback = &alc269_pcm_analog_playback; 13453 spec->stream_analog_playback = &alc269_pcm_analog_playback;
13139 spec->stream_analog_capture = &alc269_pcm_analog_capture; 13454 spec->stream_analog_capture = &alc269_pcm_analog_capture;
13140 } 13455 }
13141 spec->stream_name_digital = "ALC269 Digital";
13142 spec->stream_digital_playback = &alc269_pcm_digital_playback; 13456 spec->stream_digital_playback = &alc269_pcm_digital_playback;
13143 spec->stream_digital_capture = &alc269_pcm_digital_capture; 13457 spec->stream_digital_capture = &alc269_pcm_digital_capture;
13144 13458
@@ -13927,7 +14241,6 @@ static void alc861_auto_init_multi_out(struct hda_codec *codec)
13927 struct alc_spec *spec = codec->spec; 14241 struct alc_spec *spec = codec->spec;
13928 int i; 14242 int i;
13929 14243
13930 alc_subsystem_id(codec, 0x0e, 0x0f, 0x0b);
13931 for (i = 0; i < spec->autocfg.line_outs; i++) { 14244 for (i = 0; i < spec->autocfg.line_outs; i++) {
13932 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 14245 hda_nid_t nid = spec->autocfg.line_out_pins[i];
13933 int pin_type = get_pin_type(spec->autocfg.line_out_type); 14246 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -14010,6 +14323,8 @@ static int alc861_parse_auto_config(struct hda_codec *codec)
14010 spec->num_adc_nids = ARRAY_SIZE(alc861_adc_nids); 14323 spec->num_adc_nids = ARRAY_SIZE(alc861_adc_nids);
14011 set_capture_mixer(spec); 14324 set_capture_mixer(spec);
14012 14325
14326 alc_ssid_check(codec, 0x0e, 0x0f, 0x0b);
14327
14013 return 1; 14328 return 1;
14014} 14329}
14015 14330
@@ -14199,8 +14514,8 @@ static int patch_alc861(struct hda_codec *codec)
14199 alc861_cfg_tbl); 14514 alc861_cfg_tbl);
14200 14515
14201 if (board_config < 0) { 14516 if (board_config < 0) {
14202 printk(KERN_INFO "hda_codec: Unknown model for ALC861, " 14517 printk(KERN_INFO "hda_codec: Unknown model for %s, "
14203 "trying auto-probe from BIOS...\n"); 14518 "trying auto-probe from BIOS...\n", codec->chip_name);
14204 board_config = ALC861_AUTO; 14519 board_config = ALC861_AUTO;
14205 } 14520 }
14206 14521
@@ -14227,11 +14542,9 @@ static int patch_alc861(struct hda_codec *codec)
14227 if (board_config != ALC861_AUTO) 14542 if (board_config != ALC861_AUTO)
14228 setup_preset(spec, &alc861_presets[board_config]); 14543 setup_preset(spec, &alc861_presets[board_config]);
14229 14544
14230 spec->stream_name_analog = "ALC861 Analog";
14231 spec->stream_analog_playback = &alc861_pcm_analog_playback; 14545 spec->stream_analog_playback = &alc861_pcm_analog_playback;
14232 spec->stream_analog_capture = &alc861_pcm_analog_capture; 14546 spec->stream_analog_capture = &alc861_pcm_analog_capture;
14233 14547
14234 spec->stream_name_digital = "ALC861 Digital";
14235 spec->stream_digital_playback = &alc861_pcm_digital_playback; 14548 spec->stream_digital_playback = &alc861_pcm_digital_playback;
14236 spec->stream_digital_capture = &alc861_pcm_digital_capture; 14549 spec->stream_digital_capture = &alc861_pcm_digital_capture;
14237 14550
@@ -14618,19 +14931,6 @@ static struct hda_verb alc861vd_lenovo_unsol_verbs[] = {
14618 {} 14931 {}
14619}; 14932};
14620 14933
14621/* toggle speaker-output according to the hp-jack state */
14622static void alc861vd_lenovo_hp_automute(struct hda_codec *codec)
14623{
14624 unsigned int present;
14625 unsigned char bits;
14626
14627 present = snd_hda_codec_read(codec, 0x1b, 0,
14628 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
14629 bits = present ? HDA_AMP_MUTE : 0;
14630 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
14631 HDA_AMP_MUTE, bits);
14632}
14633
14634static void alc861vd_lenovo_mic_automute(struct hda_codec *codec) 14934static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
14635{ 14935{
14636 unsigned int present; 14936 unsigned int present;
@@ -14643,9 +14943,13 @@ static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
14643 HDA_AMP_MUTE, bits); 14943 HDA_AMP_MUTE, bits);
14644} 14944}
14645 14945
14646static void alc861vd_lenovo_automute(struct hda_codec *codec) 14946static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
14647{ 14947{
14648 alc861vd_lenovo_hp_automute(codec); 14948 struct alc_spec *spec = codec->spec;
14949
14950 spec->autocfg.hp_pins[0] = 0x1b;
14951 spec->autocfg.speaker_pins[0] = 0x14;
14952 alc_automute_amp(codec);
14649 alc861vd_lenovo_mic_automute(codec); 14953 alc861vd_lenovo_mic_automute(codec);
14650} 14954}
14651 14955
@@ -14653,12 +14957,12 @@ static void alc861vd_lenovo_unsol_event(struct hda_codec *codec,
14653 unsigned int res) 14957 unsigned int res)
14654{ 14958{
14655 switch (res >> 26) { 14959 switch (res >> 26) {
14656 case ALC880_HP_EVENT:
14657 alc861vd_lenovo_hp_automute(codec);
14658 break;
14659 case ALC880_MIC_EVENT: 14960 case ALC880_MIC_EVENT:
14660 alc861vd_lenovo_mic_automute(codec); 14961 alc861vd_lenovo_mic_automute(codec);
14661 break; 14962 break;
14963 default:
14964 alc_automute_amp_unsol_event(codec, res);
14965 break;
14662 } 14966 }
14663} 14967}
14664 14968
@@ -14708,20 +15012,13 @@ static struct hda_verb alc861vd_dallas_verbs[] = {
14708}; 15012};
14709 15013
14710/* toggle speaker-output according to the hp-jack state */ 15014/* toggle speaker-output according to the hp-jack state */
14711static void alc861vd_dallas_automute(struct hda_codec *codec) 15015static void alc861vd_dallas_init_hook(struct hda_codec *codec)
14712{ 15016{
14713 unsigned int present; 15017 struct alc_spec *spec = codec->spec;
14714
14715 present = snd_hda_codec_read(codec, 0x15, 0,
14716 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
14717 snd_hda_codec_amp_stereo(codec, 0x14, HDA_OUTPUT, 0,
14718 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
14719}
14720 15018
14721static void alc861vd_dallas_unsol_event(struct hda_codec *codec, unsigned int res) 15019 spec->autocfg.hp_pins[0] = 0x15;
14722{ 15020 spec->autocfg.speaker_pins[0] = 0x14;
14723 if ((res >> 26) == ALC880_HP_EVENT) 15021 alc_automute_amp(codec);
14724 alc861vd_dallas_automute(codec);
14725} 15022}
14726 15023
14727#ifdef CONFIG_SND_HDA_POWER_SAVE 15024#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -14835,7 +15132,7 @@ static struct alc_config_preset alc861vd_presets[] = {
14835 .channel_mode = alc861vd_3stack_2ch_modes, 15132 .channel_mode = alc861vd_3stack_2ch_modes,
14836 .input_mux = &alc861vd_capture_source, 15133 .input_mux = &alc861vd_capture_source,
14837 .unsol_event = alc861vd_lenovo_unsol_event, 15134 .unsol_event = alc861vd_lenovo_unsol_event,
14838 .init_hook = alc861vd_lenovo_automute, 15135 .init_hook = alc861vd_lenovo_init_hook,
14839 }, 15136 },
14840 [ALC861VD_DALLAS] = { 15137 [ALC861VD_DALLAS] = {
14841 .mixers = { alc861vd_dallas_mixer }, 15138 .mixers = { alc861vd_dallas_mixer },
@@ -14845,8 +15142,8 @@ static struct alc_config_preset alc861vd_presets[] = {
14845 .num_channel_mode = ARRAY_SIZE(alc861vd_3stack_2ch_modes), 15142 .num_channel_mode = ARRAY_SIZE(alc861vd_3stack_2ch_modes),
14846 .channel_mode = alc861vd_3stack_2ch_modes, 15143 .channel_mode = alc861vd_3stack_2ch_modes,
14847 .input_mux = &alc861vd_dallas_capture_source, 15144 .input_mux = &alc861vd_dallas_capture_source,
14848 .unsol_event = alc861vd_dallas_unsol_event, 15145 .unsol_event = alc_automute_amp_unsol_event,
14849 .init_hook = alc861vd_dallas_automute, 15146 .init_hook = alc861vd_dallas_init_hook,
14850 }, 15147 },
14851 [ALC861VD_HP] = { 15148 [ALC861VD_HP] = {
14852 .mixers = { alc861vd_hp_mixer }, 15149 .mixers = { alc861vd_hp_mixer },
@@ -14857,8 +15154,8 @@ static struct alc_config_preset alc861vd_presets[] = {
14857 .num_channel_mode = ARRAY_SIZE(alc861vd_3stack_2ch_modes), 15154 .num_channel_mode = ARRAY_SIZE(alc861vd_3stack_2ch_modes),
14858 .channel_mode = alc861vd_3stack_2ch_modes, 15155 .channel_mode = alc861vd_3stack_2ch_modes,
14859 .input_mux = &alc861vd_hp_capture_source, 15156 .input_mux = &alc861vd_hp_capture_source,
14860 .unsol_event = alc861vd_dallas_unsol_event, 15157 .unsol_event = alc_automute_amp_unsol_event,
14861 .init_hook = alc861vd_dallas_automute, 15158 .init_hook = alc861vd_dallas_init_hook,
14862 }, 15159 },
14863 [ALC660VD_ASUS_V1S] = { 15160 [ALC660VD_ASUS_V1S] = {
14864 .mixers = { alc861vd_lenovo_mixer }, 15161 .mixers = { alc861vd_lenovo_mixer },
@@ -14873,7 +15170,7 @@ static struct alc_config_preset alc861vd_presets[] = {
14873 .channel_mode = alc861vd_3stack_2ch_modes, 15170 .channel_mode = alc861vd_3stack_2ch_modes,
14874 .input_mux = &alc861vd_capture_source, 15171 .input_mux = &alc861vd_capture_source,
14875 .unsol_event = alc861vd_lenovo_unsol_event, 15172 .unsol_event = alc861vd_lenovo_unsol_event,
14876 .init_hook = alc861vd_lenovo_automute, 15173 .init_hook = alc861vd_lenovo_init_hook,
14877 }, 15174 },
14878}; 15175};
14879 15176
@@ -14891,7 +15188,6 @@ static void alc861vd_auto_init_multi_out(struct hda_codec *codec)
14891 struct alc_spec *spec = codec->spec; 15188 struct alc_spec *spec = codec->spec;
14892 int i; 15189 int i;
14893 15190
14894 alc_subsystem_id(codec, 0x15, 0x1b, 0x14);
14895 for (i = 0; i <= HDA_SIDE; i++) { 15191 for (i = 0; i <= HDA_SIDE; i++) {
14896 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 15192 hda_nid_t nid = spec->autocfg.line_out_pins[i];
14897 int pin_type = get_pin_type(spec->autocfg.line_out_type); 15193 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -15109,6 +15405,8 @@ static int alc861vd_parse_auto_config(struct hda_codec *codec)
15109 if (err < 0) 15405 if (err < 0)
15110 return err; 15406 return err;
15111 15407
15408 alc_ssid_check(codec, 0x15, 0x1b, 0x14);
15409
15112 return 1; 15410 return 1;
15113} 15411}
15114 15412
@@ -15140,8 +15438,8 @@ static int patch_alc861vd(struct hda_codec *codec)
15140 alc861vd_cfg_tbl); 15438 alc861vd_cfg_tbl);
15141 15439
15142 if (board_config < 0 || board_config >= ALC861VD_MODEL_LAST) { 15440 if (board_config < 0 || board_config >= ALC861VD_MODEL_LAST) {
15143 printk(KERN_INFO "hda_codec: Unknown model for ALC660VD/" 15441 printk(KERN_INFO "hda_codec: Unknown model for %s, "
15144 "ALC861VD, trying auto-probe from BIOS...\n"); 15442 "trying auto-probe from BIOS...\n", codec->chip_name);
15145 board_config = ALC861VD_AUTO; 15443 board_config = ALC861VD_AUTO;
15146 } 15444 }
15147 15445
@@ -15169,13 +15467,8 @@ static int patch_alc861vd(struct hda_codec *codec)
15169 setup_preset(spec, &alc861vd_presets[board_config]); 15467 setup_preset(spec, &alc861vd_presets[board_config]);
15170 15468
15171 if (codec->vendor_id == 0x10ec0660) { 15469 if (codec->vendor_id == 0x10ec0660) {
15172 spec->stream_name_analog = "ALC660-VD Analog";
15173 spec->stream_name_digital = "ALC660-VD Digital";
15174 /* always turn on EAPD */ 15470 /* always turn on EAPD */
15175 add_verb(spec, alc660vd_eapd_verbs); 15471 add_verb(spec, alc660vd_eapd_verbs);
15176 } else {
15177 spec->stream_name_analog = "ALC861VD Analog";
15178 spec->stream_name_digital = "ALC861VD Digital";
15179 } 15472 }
15180 15473
15181 spec->stream_analog_playback = &alc861vd_pcm_analog_playback; 15474 spec->stream_analog_playback = &alc861vd_pcm_analog_playback;
@@ -15289,6 +15582,38 @@ static struct hda_input_mux alc663_m51va_capture_source = {
15289 }, 15582 },
15290}; 15583};
15291 15584
15585#if 1 /* set to 0 for testing other input sources below */
15586static struct hda_input_mux alc272_nc10_capture_source = {
15587 .num_items = 2,
15588 .items = {
15589 { "Autoselect Mic", 0x0 },
15590 { "Internal Mic", 0x1 },
15591 },
15592};
15593#else
15594static struct hda_input_mux alc272_nc10_capture_source = {
15595 .num_items = 16,
15596 .items = {
15597 { "Autoselect Mic", 0x0 },
15598 { "Internal Mic", 0x1 },
15599 { "In-0x02", 0x2 },
15600 { "In-0x03", 0x3 },
15601 { "In-0x04", 0x4 },
15602 { "In-0x05", 0x5 },
15603 { "In-0x06", 0x6 },
15604 { "In-0x07", 0x7 },
15605 { "In-0x08", 0x8 },
15606 { "In-0x09", 0x9 },
15607 { "In-0x0a", 0x0a },
15608 { "In-0x0b", 0x0b },
15609 { "In-0x0c", 0x0c },
15610 { "In-0x0d", 0x0d },
15611 { "In-0x0e", 0x0e },
15612 { "In-0x0f", 0x0f },
15613 },
15614};
15615#endif
15616
15292/* 15617/*
15293 * 2ch mode 15618 * 2ch mode
15294 */ 15619 */
@@ -15428,10 +15753,8 @@ static struct snd_kcontrol_new alc662_lenovo_101e_mixer[] = {
15428}; 15753};
15429 15754
15430static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = { 15755static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = {
15431 HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT), 15756 HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
15432 15757 ALC262_HIPPO_MASTER_SWITCH,
15433 HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x02, 0x0, HDA_OUTPUT),
15434 HDA_CODEC_MUTE("Line-Out Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
15435 15758
15436 HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT), 15759 HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT),
15437 HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), 15760 HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
@@ -15444,15 +15767,11 @@ static struct snd_kcontrol_new alc662_eeepc_p701_mixer[] = {
15444}; 15767};
15445 15768
15446static struct snd_kcontrol_new alc662_eeepc_ep20_mixer[] = { 15769static struct snd_kcontrol_new alc662_eeepc_ep20_mixer[] = {
15447 HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x02, 0x0, HDA_OUTPUT), 15770 ALC262_HIPPO_MASTER_SWITCH,
15448 HDA_CODEC_MUTE("Line-Out Playback Switch", 0x14, 0x0, HDA_OUTPUT), 15771 HDA_CODEC_VOLUME("Front Playback Volume", 0x02, 0x0, HDA_OUTPUT),
15449 HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT), 15772 HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT),
15450 HDA_BIND_MUTE("Surround Playback Switch", 0x03, 2, HDA_INPUT),
15451 HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x04, 1, 0x0, HDA_OUTPUT), 15773 HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x04, 1, 0x0, HDA_OUTPUT),
15452 HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x04, 2, 0x0, HDA_OUTPUT), 15774 HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x04, 2, 0x0, HDA_OUTPUT),
15453 HDA_BIND_MUTE_MONO("Center Playback Switch", 0x04, 1, 2, HDA_INPUT),
15454 HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x04, 2, 2, HDA_INPUT),
15455 HDA_CODEC_MUTE("Speaker Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
15456 HDA_BIND_MUTE("MuteCtrl Playback Switch", 0x0c, 2, HDA_INPUT), 15775 HDA_BIND_MUTE("MuteCtrl Playback Switch", 0x0c, 2, HDA_INPUT),
15457 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), 15776 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
15458 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), 15777 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
@@ -15960,51 +16279,25 @@ static void alc662_eeepc_mic_automute(struct hda_codec *codec)
15960static void alc662_eeepc_unsol_event(struct hda_codec *codec, 16279static void alc662_eeepc_unsol_event(struct hda_codec *codec,
15961 unsigned int res) 16280 unsigned int res)
15962{ 16281{
15963 if ((res >> 26) == ALC880_HP_EVENT)
15964 alc262_hippo1_automute( codec );
15965
15966 if ((res >> 26) == ALC880_MIC_EVENT) 16282 if ((res >> 26) == ALC880_MIC_EVENT)
15967 alc662_eeepc_mic_automute(codec); 16283 alc662_eeepc_mic_automute(codec);
16284 else
16285 alc262_hippo_unsol_event(codec, res);
15968} 16286}
15969 16287
15970static void alc662_eeepc_inithook(struct hda_codec *codec) 16288static void alc662_eeepc_inithook(struct hda_codec *codec)
15971{ 16289{
15972 alc262_hippo1_automute( codec ); 16290 alc262_hippo1_init_hook(codec);
15973 alc662_eeepc_mic_automute(codec); 16291 alc662_eeepc_mic_automute(codec);
15974} 16292}
15975 16293
15976static void alc662_eeepc_ep20_automute(struct hda_codec *codec)
15977{
15978 unsigned int mute;
15979 unsigned int present;
15980
15981 snd_hda_codec_read(codec, 0x14, 0, AC_VERB_SET_PIN_SENSE, 0);
15982 present = snd_hda_codec_read(codec, 0x14, 0,
15983 AC_VERB_GET_PIN_SENSE, 0);
15984 present = (present & 0x80000000) != 0;
15985 if (present) {
15986 /* mute internal speaker */
15987 snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
15988 HDA_AMP_MUTE, HDA_AMP_MUTE);
15989 } else {
15990 /* unmute internal speaker if necessary */
15991 mute = snd_hda_codec_amp_read(codec, 0x14, 0, HDA_OUTPUT, 0);
15992 snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
15993 HDA_AMP_MUTE, mute);
15994 }
15995}
15996
15997/* unsolicited event for HP jack sensing */
15998static void alc662_eeepc_ep20_unsol_event(struct hda_codec *codec,
15999 unsigned int res)
16000{
16001 if ((res >> 26) == ALC880_HP_EVENT)
16002 alc662_eeepc_ep20_automute(codec);
16003}
16004
16005static void alc662_eeepc_ep20_inithook(struct hda_codec *codec) 16294static void alc662_eeepc_ep20_inithook(struct hda_codec *codec)
16006{ 16295{
16007 alc662_eeepc_ep20_automute(codec); 16296 struct alc_spec *spec = codec->spec;
16297
16298 spec->autocfg.hp_pins[0] = 0x14;
16299 spec->autocfg.speaker_pins[0] = 0x1b;
16300 alc262_hippo_master_update(codec);
16008} 16301}
16009 16302
16010static void alc663_m51va_speaker_automute(struct hda_codec *codec) 16303static void alc663_m51va_speaker_automute(struct hda_codec *codec)
@@ -16338,35 +16631,9 @@ static void alc663_g50v_inithook(struct hda_codec *codec)
16338 alc662_eeepc_mic_automute(codec); 16631 alc662_eeepc_mic_automute(codec);
16339} 16632}
16340 16633
16341/* bind hp and internal speaker mute (with plug check) */
16342static int alc662_ecs_master_sw_put(struct snd_kcontrol *kcontrol,
16343 struct snd_ctl_elem_value *ucontrol)
16344{
16345 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
16346 long *valp = ucontrol->value.integer.value;
16347 int change;
16348
16349 change = snd_hda_codec_amp_update(codec, 0x1b, 0, HDA_OUTPUT, 0,
16350 HDA_AMP_MUTE,
16351 valp[0] ? 0 : HDA_AMP_MUTE);
16352 change |= snd_hda_codec_amp_update(codec, 0x1b, 1, HDA_OUTPUT, 0,
16353 HDA_AMP_MUTE,
16354 valp[1] ? 0 : HDA_AMP_MUTE);
16355 if (change)
16356 alc262_hippo1_automute(codec);
16357 return change;
16358}
16359
16360static struct snd_kcontrol_new alc662_ecs_mixer[] = { 16634static struct snd_kcontrol_new alc662_ecs_mixer[] = {
16361 HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT), 16635 HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
16362 { 16636 ALC262_HIPPO_MASTER_SWITCH,
16363 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
16364 .name = "Master Playback Switch",
16365 .info = snd_hda_mixer_amp_switch_info,
16366 .get = snd_hda_mixer_amp_switch_get,
16367 .put = alc662_ecs_master_sw_put,
16368 .private_value = HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
16369 },
16370 16637
16371 HDA_CODEC_VOLUME("e-Mic/LineIn Boost", 0x18, 0, HDA_INPUT), 16638 HDA_CODEC_VOLUME("e-Mic/LineIn Boost", 0x18, 0, HDA_INPUT),
16372 HDA_CODEC_VOLUME("e-Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT), 16639 HDA_CODEC_VOLUME("e-Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
@@ -16378,6 +16645,23 @@ static struct snd_kcontrol_new alc662_ecs_mixer[] = {
16378 { } /* end */ 16645 { } /* end */
16379}; 16646};
16380 16647
16648static struct snd_kcontrol_new alc272_nc10_mixer[] = {
16649 /* Master Playback automatically created from Speaker and Headphone */
16650 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x02, 0x0, HDA_OUTPUT),
16651 HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
16652 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
16653 HDA_CODEC_MUTE("Headphone Playback Switch", 0x21, 0x0, HDA_OUTPUT),
16654
16655 HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
16656 HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
16657 HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
16658
16659 HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
16660 HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
16661 HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
16662 { } /* end */
16663};
16664
16381#ifdef CONFIG_SND_HDA_POWER_SAVE 16665#ifdef CONFIG_SND_HDA_POWER_SAVE
16382#define alc662_loopbacks alc880_loopbacks 16666#define alc662_loopbacks alc880_loopbacks
16383#endif 16667#endif
@@ -16411,6 +16695,9 @@ static const char *alc662_models[ALC662_MODEL_LAST] = {
16411 [ALC663_ASUS_MODE4] = "asus-mode4", 16695 [ALC663_ASUS_MODE4] = "asus-mode4",
16412 [ALC663_ASUS_MODE5] = "asus-mode5", 16696 [ALC663_ASUS_MODE5] = "asus-mode5",
16413 [ALC663_ASUS_MODE6] = "asus-mode6", 16697 [ALC663_ASUS_MODE6] = "asus-mode6",
16698 [ALC272_DELL] = "dell",
16699 [ALC272_DELL_ZM1] = "dell-zm1",
16700 [ALC272_SAMSUNG_NC10] = "samsung-nc10",
16414 [ALC662_AUTO] = "auto", 16701 [ALC662_AUTO] = "auto",
16415}; 16702};
16416 16703
@@ -16468,6 +16755,7 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
16468 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_ECS), 16755 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_ECS),
16469 SND_PCI_QUIRK(0x105b, 0x0d47, "Foxconn 45CMX/45GMX/45CMX-K", 16756 SND_PCI_QUIRK(0x105b, 0x0d47, "Foxconn 45CMX/45GMX/45CMX-K",
16470 ALC662_3ST_6ch_DIG), 16757 ALC662_3ST_6ch_DIG),
16758 SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10),
16471 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", 16759 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L",
16472 ALC662_3ST_6ch_DIG), 16760 ALC662_3ST_6ch_DIG),
16473 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG), 16761 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG),
@@ -16558,7 +16846,7 @@ static struct alc_config_preset alc662_presets[] = {
16558 .num_channel_mode = ARRAY_SIZE(alc662_3ST_6ch_modes), 16846 .num_channel_mode = ARRAY_SIZE(alc662_3ST_6ch_modes),
16559 .channel_mode = alc662_3ST_6ch_modes, 16847 .channel_mode = alc662_3ST_6ch_modes,
16560 .input_mux = &alc662_lenovo_101e_capture_source, 16848 .input_mux = &alc662_lenovo_101e_capture_source,
16561 .unsol_event = alc662_eeepc_ep20_unsol_event, 16849 .unsol_event = alc662_eeepc_unsol_event,
16562 .init_hook = alc662_eeepc_ep20_inithook, 16850 .init_hook = alc662_eeepc_ep20_inithook,
16563 }, 16851 },
16564 [ALC662_ECS] = { 16852 [ALC662_ECS] = {
@@ -16739,6 +17027,18 @@ static struct alc_config_preset alc662_presets[] = {
16739 .unsol_event = alc663_m51va_unsol_event, 17027 .unsol_event = alc663_m51va_unsol_event,
16740 .init_hook = alc663_m51va_inithook, 17028 .init_hook = alc663_m51va_inithook,
16741 }, 17029 },
17030 [ALC272_SAMSUNG_NC10] = {
17031 .mixers = { alc272_nc10_mixer },
17032 .init_verbs = { alc662_init_verbs,
17033 alc663_21jd_amic_init_verbs },
17034 .num_dacs = ARRAY_SIZE(alc272_dac_nids),
17035 .dac_nids = alc272_dac_nids,
17036 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17037 .channel_mode = alc662_3ST_2ch_modes,
17038 .input_mux = &alc272_nc10_capture_source,
17039 .unsol_event = alc663_mode4_unsol_event,
17040 .init_hook = alc663_mode4_inithook,
17041 },
16742}; 17042};
16743 17043
16744 17044
@@ -16933,7 +17233,6 @@ static void alc662_auto_init_multi_out(struct hda_codec *codec)
16933 struct alc_spec *spec = codec->spec; 17233 struct alc_spec *spec = codec->spec;
16934 int i; 17234 int i;
16935 17235
16936 alc_subsystem_id(codec, 0x15, 0x1b, 0x14);
16937 for (i = 0; i <= HDA_SIDE; i++) { 17236 for (i = 0; i <= HDA_SIDE; i++) {
16938 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 17237 hda_nid_t nid = spec->autocfg.line_out_pins[i];
16939 int pin_type = get_pin_type(spec->autocfg.line_out_type); 17238 int pin_type = get_pin_type(spec->autocfg.line_out_type);
@@ -17030,6 +17329,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
17030 if (err < 0) 17329 if (err < 0)
17031 return err; 17330 return err;
17032 17331
17332 alc_ssid_check(codec, 0x15, 0x1b, 0x14);
17333
17033 return 1; 17334 return 1;
17034} 17335}
17035 17336
@@ -17062,8 +17363,8 @@ static int patch_alc662(struct hda_codec *codec)
17062 alc662_models, 17363 alc662_models,
17063 alc662_cfg_tbl); 17364 alc662_cfg_tbl);
17064 if (board_config < 0) { 17365 if (board_config < 0) {
17065 printk(KERN_INFO "hda_codec: Unknown model for ALC662, " 17366 printk(KERN_INFO "hda_codec: Unknown model for %s, "
17066 "trying auto-probe from BIOS...\n"); 17367 "trying auto-probe from BIOS...\n", codec->chip_name);
17067 board_config = ALC662_AUTO; 17368 board_config = ALC662_AUTO;
17068 } 17369 }
17069 17370
@@ -17090,17 +17391,6 @@ static int patch_alc662(struct hda_codec *codec)
17090 if (board_config != ALC662_AUTO) 17391 if (board_config != ALC662_AUTO)
17091 setup_preset(spec, &alc662_presets[board_config]); 17392 setup_preset(spec, &alc662_presets[board_config]);
17092 17393
17093 if (codec->vendor_id == 0x10ec0663) {
17094 spec->stream_name_analog = "ALC663 Analog";
17095 spec->stream_name_digital = "ALC663 Digital";
17096 } else if (codec->vendor_id == 0x10ec0272) {
17097 spec->stream_name_analog = "ALC272 Analog";
17098 spec->stream_name_digital = "ALC272 Digital";
17099 } else {
17100 spec->stream_name_analog = "ALC662 Analog";
17101 spec->stream_name_digital = "ALC662 Digital";
17102 }
17103
17104 spec->stream_analog_playback = &alc662_pcm_analog_playback; 17394 spec->stream_analog_playback = &alc662_pcm_analog_playback;
17105 spec->stream_analog_capture = &alc662_pcm_analog_capture; 17395 spec->stream_analog_capture = &alc662_pcm_analog_capture;
17106 17396
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index d2fd8ef6aef8..93e47c96a38b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -100,6 +100,7 @@ enum {
100 STAC_HP_M4, 100 STAC_HP_M4,
101 STAC_HP_DV5, 101 STAC_HP_DV5,
102 STAC_HP_HDX, 102 STAC_HP_HDX,
103 STAC_HP_DV4_1222NR,
103 STAC_92HD71BXX_MODELS 104 STAC_92HD71BXX_MODELS
104}; 105};
105 106
@@ -193,6 +194,7 @@ struct sigmatel_spec {
193 unsigned int gpio_dir; 194 unsigned int gpio_dir;
194 unsigned int gpio_data; 195 unsigned int gpio_data;
195 unsigned int gpio_mute; 196 unsigned int gpio_mute;
197 unsigned int gpio_led;
196 198
197 /* stream */ 199 /* stream */
198 unsigned int stream_delay; 200 unsigned int stream_delay;
@@ -634,6 +636,40 @@ static int stac92xx_smux_enum_put(struct snd_kcontrol *kcontrol,
634 return 0; 636 return 0;
635} 637}
636 638
639static unsigned int stac92xx_vref_set(struct hda_codec *codec,
640 hda_nid_t nid, unsigned int new_vref)
641{
642 unsigned int error;
643 unsigned int pincfg;
644 pincfg = snd_hda_codec_read(codec, nid, 0,
645 AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
646
647 pincfg &= 0xff;
648 pincfg &= ~(AC_PINCTL_VREFEN | AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN);
649 pincfg |= new_vref;
650
651 if (new_vref == AC_PINCTL_VREF_HIZ)
652 pincfg |= AC_PINCTL_OUT_EN;
653 else
654 pincfg |= AC_PINCTL_IN_EN;
655
656 error = snd_hda_codec_write_cache(codec, nid, 0,
657 AC_VERB_SET_PIN_WIDGET_CONTROL, pincfg);
658 if (error < 0)
659 return error;
660 else
661 return 1;
662}
663
664static unsigned int stac92xx_vref_get(struct hda_codec *codec, hda_nid_t nid)
665{
666 unsigned int vref;
667 vref = snd_hda_codec_read(codec, nid, 0,
668 AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
669 vref &= AC_PINCTL_VREFEN;
670 return vref;
671}
672
637static int stac92xx_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) 673static int stac92xx_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
638{ 674{
639 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 675 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
@@ -995,6 +1031,17 @@ static struct hda_verb stac9205_core_init[] = {
995 .private_value = verb_read | (verb_write << 16), \ 1031 .private_value = verb_read | (verb_write << 16), \
996 } 1032 }
997 1033
1034#define DC_BIAS(xname, idx, nid) \
1035 { \
1036 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1037 .name = xname, \
1038 .index = idx, \
1039 .info = stac92xx_dc_bias_info, \
1040 .get = stac92xx_dc_bias_get, \
1041 .put = stac92xx_dc_bias_put, \
1042 .private_value = nid, \
1043 }
1044
998static struct snd_kcontrol_new stac9200_mixer[] = { 1045static struct snd_kcontrol_new stac9200_mixer[] = {
999 HDA_CODEC_VOLUME("Master Playback Volume", 0xb, 0, HDA_OUTPUT), 1046 HDA_CODEC_VOLUME("Master Playback Volume", 0xb, 0, HDA_OUTPUT),
1000 HDA_CODEC_MUTE("Master Playback Switch", 0xb, 0, HDA_OUTPUT), 1047 HDA_CODEC_MUTE("Master Playback Switch", 0xb, 0, HDA_OUTPUT),
@@ -1543,6 +1590,8 @@ static struct snd_pci_quirk stac9200_cfg_tbl[] = {
1543 /* SigmaTel reference board */ 1590 /* SigmaTel reference board */
1544 SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, 1591 SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
1545 "DFI LanParty", STAC_REF), 1592 "DFI LanParty", STAC_REF),
1593 SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xfb30,
1594 "SigmaTel",STAC_9205_REF),
1546 SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, 1595 SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
1547 "DFI LanParty", STAC_REF), 1596 "DFI LanParty", STAC_REF),
1548 /* Dell laptops have BIOS problem */ 1597 /* Dell laptops have BIOS problem */
@@ -1837,6 +1886,7 @@ static unsigned int *stac92hd71bxx_brd_tbl[STAC_92HD71BXX_MODELS] = {
1837 [STAC_HP_M4] = NULL, 1886 [STAC_HP_M4] = NULL,
1838 [STAC_HP_DV5] = NULL, 1887 [STAC_HP_DV5] = NULL,
1839 [STAC_HP_HDX] = NULL, 1888 [STAC_HP_HDX] = NULL,
1889 [STAC_HP_DV4_1222NR] = NULL,
1840}; 1890};
1841 1891
1842static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = { 1892static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = {
@@ -1848,6 +1898,7 @@ static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = {
1848 [STAC_HP_M4] = "hp-m4", 1898 [STAC_HP_M4] = "hp-m4",
1849 [STAC_HP_DV5] = "hp-dv5", 1899 [STAC_HP_DV5] = "hp-dv5",
1850 [STAC_HP_HDX] = "hp-hdx", 1900 [STAC_HP_HDX] = "hp-hdx",
1901 [STAC_HP_DV4_1222NR] = "hp-dv4-1222nr",
1851}; 1902};
1852 1903
1853static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { 1904static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
@@ -1856,6 +1907,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
1856 "DFI LanParty", STAC_92HD71BXX_REF), 1907 "DFI LanParty", STAC_92HD71BXX_REF),
1857 SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, 1908 SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
1858 "DFI LanParty", STAC_92HD71BXX_REF), 1909 "DFI LanParty", STAC_92HD71BXX_REF),
1910 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fb,
1911 "HP dv4-1222nr", STAC_HP_DV4_1222NR),
1859 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3080, 1912 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3080,
1860 "HP", STAC_HP_DV5), 1913 "HP", STAC_HP_DV5),
1861 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x30f0, 1914 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x30f0,
@@ -2545,7 +2598,8 @@ static int stac92xx_build_pcms(struct hda_codec *codec)
2545 return 0; 2598 return 0;
2546} 2599}
2547 2600
2548static unsigned int stac92xx_get_vref(struct hda_codec *codec, hda_nid_t nid) 2601static unsigned int stac92xx_get_default_vref(struct hda_codec *codec,
2602 hda_nid_t nid)
2549{ 2603{
2550 unsigned int pincap = snd_hda_query_pin_caps(codec, nid); 2604 unsigned int pincap = snd_hda_query_pin_caps(codec, nid);
2551 pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT; 2605 pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT;
@@ -2599,15 +2653,108 @@ static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol,
2599 return 1; 2653 return 1;
2600} 2654}
2601 2655
2602#define stac92xx_io_switch_info snd_ctl_boolean_mono_info 2656static int stac92xx_dc_bias_info(struct snd_kcontrol *kcontrol,
2657 struct snd_ctl_elem_info *uinfo)
2658{
2659 int i;
2660 static char *texts[] = {
2661 "Mic In", "Line In", "Line Out"
2662 };
2663
2664 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2665 struct sigmatel_spec *spec = codec->spec;
2666 hda_nid_t nid = kcontrol->private_value;
2667
2668 if (nid == spec->mic_switch || nid == spec->line_switch)
2669 i = 3;
2670 else
2671 i = 2;
2672
2673 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
2674 uinfo->value.enumerated.items = i;
2675 uinfo->count = 1;
2676 if (uinfo->value.enumerated.item >= i)
2677 uinfo->value.enumerated.item = i-1;
2678 strcpy(uinfo->value.enumerated.name,
2679 texts[uinfo->value.enumerated.item]);
2680
2681 return 0;
2682}
2683
2684static int stac92xx_dc_bias_get(struct snd_kcontrol *kcontrol,
2685 struct snd_ctl_elem_value *ucontrol)
2686{
2687 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2688 hda_nid_t nid = kcontrol->private_value;
2689 unsigned int vref = stac92xx_vref_get(codec, nid);
2690
2691 if (vref == stac92xx_get_default_vref(codec, nid))
2692 ucontrol->value.enumerated.item[0] = 0;
2693 else if (vref == AC_PINCTL_VREF_GRD)
2694 ucontrol->value.enumerated.item[0] = 1;
2695 else if (vref == AC_PINCTL_VREF_HIZ)
2696 ucontrol->value.enumerated.item[0] = 2;
2697
2698 return 0;
2699}
2700
2701static int stac92xx_dc_bias_put(struct snd_kcontrol *kcontrol,
2702 struct snd_ctl_elem_value *ucontrol)
2703{
2704 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2705 unsigned int new_vref = 0;
2706 unsigned int error;
2707 hda_nid_t nid = kcontrol->private_value;
2708
2709 if (ucontrol->value.enumerated.item[0] == 0)
2710 new_vref = stac92xx_get_default_vref(codec, nid);
2711 else if (ucontrol->value.enumerated.item[0] == 1)
2712 new_vref = AC_PINCTL_VREF_GRD;
2713 else if (ucontrol->value.enumerated.item[0] == 2)
2714 new_vref = AC_PINCTL_VREF_HIZ;
2715 else
2716 return 0;
2717
2718 if (new_vref != stac92xx_vref_get(codec, nid)) {
2719 error = stac92xx_vref_set(codec, nid, new_vref);
2720 return error;
2721 }
2722
2723 return 0;
2724}
2725
2726static int stac92xx_io_switch_info(struct snd_kcontrol *kcontrol,
2727 struct snd_ctl_elem_info *uinfo)
2728{
2729 static char *texts[2];
2730 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2731 struct sigmatel_spec *spec = codec->spec;
2732
2733 if (kcontrol->private_value == spec->line_switch)
2734 texts[0] = "Line In";
2735 else
2736 texts[0] = "Mic In";
2737 texts[1] = "Line Out";
2738 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
2739 uinfo->value.enumerated.items = 2;
2740 uinfo->count = 1;
2741
2742 if (uinfo->value.enumerated.item >= 2)
2743 uinfo->value.enumerated.item = 1;
2744 strcpy(uinfo->value.enumerated.name,
2745 texts[uinfo->value.enumerated.item]);
2746
2747 return 0;
2748}
2603 2749
2604static int stac92xx_io_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) 2750static int stac92xx_io_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
2605{ 2751{
2606 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 2752 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2607 struct sigmatel_spec *spec = codec->spec; 2753 struct sigmatel_spec *spec = codec->spec;
2608 int io_idx = kcontrol-> private_value & 0xff; 2754 hda_nid_t nid = kcontrol->private_value;
2755 int io_idx = (nid == spec->mic_switch) ? 1 : 0;
2609 2756
2610 ucontrol->value.integer.value[0] = spec->io_switch[io_idx]; 2757 ucontrol->value.enumerated.item[0] = spec->io_switch[io_idx];
2611 return 0; 2758 return 0;
2612} 2759}
2613 2760
@@ -2615,9 +2762,9 @@ static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
2615{ 2762{
2616 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 2763 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2617 struct sigmatel_spec *spec = codec->spec; 2764 struct sigmatel_spec *spec = codec->spec;
2618 hda_nid_t nid = kcontrol->private_value >> 8; 2765 hda_nid_t nid = kcontrol->private_value;
2619 int io_idx = kcontrol-> private_value & 0xff; 2766 int io_idx = (nid == spec->mic_switch) ? 1 : 0;
2620 unsigned short val = !!ucontrol->value.integer.value[0]; 2767 unsigned short val = !!ucontrol->value.enumerated.item[0];
2621 2768
2622 spec->io_switch[io_idx] = val; 2769 spec->io_switch[io_idx] = val;
2623 2770
@@ -2626,7 +2773,7 @@ static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
2626 else { 2773 else {
2627 unsigned int pinctl = AC_PINCTL_IN_EN; 2774 unsigned int pinctl = AC_PINCTL_IN_EN;
2628 if (io_idx) /* set VREF for mic */ 2775 if (io_idx) /* set VREF for mic */
2629 pinctl |= stac92xx_get_vref(codec, nid); 2776 pinctl |= stac92xx_get_default_vref(codec, nid);
2630 stac92xx_auto_set_pinctl(codec, nid, pinctl); 2777 stac92xx_auto_set_pinctl(codec, nid, pinctl);
2631 } 2778 }
2632 2779
@@ -2707,7 +2854,8 @@ enum {
2707 STAC_CTL_WIDGET_AMP_VOL, 2854 STAC_CTL_WIDGET_AMP_VOL,
2708 STAC_CTL_WIDGET_HP_SWITCH, 2855 STAC_CTL_WIDGET_HP_SWITCH,
2709 STAC_CTL_WIDGET_IO_SWITCH, 2856 STAC_CTL_WIDGET_IO_SWITCH,
2710 STAC_CTL_WIDGET_CLFE_SWITCH 2857 STAC_CTL_WIDGET_CLFE_SWITCH,
2858 STAC_CTL_WIDGET_DC_BIAS
2711}; 2859};
2712 2860
2713static struct snd_kcontrol_new stac92xx_control_templates[] = { 2861static struct snd_kcontrol_new stac92xx_control_templates[] = {
@@ -2719,6 +2867,7 @@ static struct snd_kcontrol_new stac92xx_control_templates[] = {
2719 STAC_CODEC_HP_SWITCH(NULL), 2867 STAC_CODEC_HP_SWITCH(NULL),
2720 STAC_CODEC_IO_SWITCH(NULL, 0), 2868 STAC_CODEC_IO_SWITCH(NULL, 0),
2721 STAC_CODEC_CLFE_SWITCH(NULL, 0), 2869 STAC_CODEC_CLFE_SWITCH(NULL, 0),
2870 DC_BIAS(NULL, 0, 0),
2722}; 2871};
2723 2872
2724/* add dynamic controls */ 2873/* add dynamic controls */
@@ -2782,6 +2931,34 @@ static struct snd_kcontrol_new stac_input_src_temp = {
2782 .put = stac92xx_mux_enum_put, 2931 .put = stac92xx_mux_enum_put,
2783}; 2932};
2784 2933
2934static inline int stac92xx_add_jack_mode_control(struct hda_codec *codec,
2935 hda_nid_t nid, int idx)
2936{
2937 int def_conf = snd_hda_codec_get_pincfg(codec, nid);
2938 int control = 0;
2939 struct sigmatel_spec *spec = codec->spec;
2940 char name[22];
2941
2942 if (!((get_defcfg_connect(def_conf)) & AC_JACK_PORT_FIXED)) {
2943 if (stac92xx_get_default_vref(codec, nid) == AC_PINCTL_VREF_GRD
2944 && nid == spec->line_switch)
2945 control = STAC_CTL_WIDGET_IO_SWITCH;
2946 else if (snd_hda_query_pin_caps(codec, nid)
2947 & (AC_PINCAP_VREF_GRD << AC_PINCAP_VREF_SHIFT))
2948 control = STAC_CTL_WIDGET_DC_BIAS;
2949 else if (nid == spec->mic_switch)
2950 control = STAC_CTL_WIDGET_IO_SWITCH;
2951 }
2952
2953 if (control) {
2954 strcpy(name, auto_pin_cfg_labels[idx]);
2955 return stac92xx_add_control(codec->spec, control,
2956 strcat(name, " Jack Mode"), nid);
2957 }
2958
2959 return 0;
2960}
2961
2785static int stac92xx_add_input_source(struct sigmatel_spec *spec) 2962static int stac92xx_add_input_source(struct sigmatel_spec *spec)
2786{ 2963{
2787 struct snd_kcontrol_new *knew; 2964 struct snd_kcontrol_new *knew;
@@ -3144,7 +3321,9 @@ static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec,
3144 const struct auto_pin_cfg *cfg) 3321 const struct auto_pin_cfg *cfg)
3145{ 3322{
3146 struct sigmatel_spec *spec = codec->spec; 3323 struct sigmatel_spec *spec = codec->spec;
3324 hda_nid_t nid;
3147 int err; 3325 int err;
3326 int idx;
3148 3327
3149 err = create_multi_out_ctls(codec, cfg->line_outs, cfg->line_out_pins, 3328 err = create_multi_out_ctls(codec, cfg->line_outs, cfg->line_out_pins,
3150 spec->multiout.dac_nids, 3329 spec->multiout.dac_nids,
@@ -3161,20 +3340,13 @@ static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec,
3161 return err; 3340 return err;
3162 } 3341 }
3163 3342
3164 if (spec->line_switch) { 3343 for (idx = AUTO_PIN_MIC; idx <= AUTO_PIN_FRONT_LINE; idx++) {
3165 err = stac92xx_add_control(spec, STAC_CTL_WIDGET_IO_SWITCH, 3344 nid = cfg->input_pins[idx];
3166 "Line In as Output Switch", 3345 if (nid) {
3167 spec->line_switch << 8); 3346 err = stac92xx_add_jack_mode_control(codec, nid, idx);
3168 if (err < 0) 3347 if (err < 0)
3169 return err; 3348 return err;
3170 } 3349 }
3171
3172 if (spec->mic_switch) {
3173 err = stac92xx_add_control(spec, STAC_CTL_WIDGET_IO_SWITCH,
3174 "Mic as Output Switch",
3175 (spec->mic_switch << 8) | 1);
3176 if (err < 0)
3177 return err;
3178 } 3350 }
3179 3351
3180 return 0; 3352 return 0;
@@ -3639,6 +3811,8 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3639 err = snd_hda_attach_beep_device(codec, nid); 3811 err = snd_hda_attach_beep_device(codec, nid);
3640 if (err < 0) 3812 if (err < 0)
3641 return err; 3813 return err;
3814 /* IDT/STAC codecs have linear beep tone parameter */
3815 codec->beep->linear_tone = 1;
3642 /* if no beep switch is available, make its own one */ 3816 /* if no beep switch is available, make its own one */
3643 caps = query_amp_caps(codec, nid, HDA_OUTPUT); 3817 caps = query_amp_caps(codec, nid, HDA_OUTPUT);
3644 if (codec->beep && 3818 if (codec->beep &&
@@ -4082,7 +4256,7 @@ static int stac92xx_init(struct hda_codec *codec)
4082 unsigned int pinctl, conf; 4256 unsigned int pinctl, conf;
4083 if (i == AUTO_PIN_MIC || i == AUTO_PIN_FRONT_MIC) { 4257 if (i == AUTO_PIN_MIC || i == AUTO_PIN_FRONT_MIC) {
4084 /* for mic pins, force to initialize */ 4258 /* for mic pins, force to initialize */
4085 pinctl = stac92xx_get_vref(codec, nid); 4259 pinctl = stac92xx_get_default_vref(codec, nid);
4086 pinctl |= AC_PINCTL_IN_EN; 4260 pinctl |= AC_PINCTL_IN_EN;
4087 stac92xx_auto_set_pinctl(codec, nid, pinctl); 4261 stac92xx_auto_set_pinctl(codec, nid, pinctl);
4088 } else { 4262 } else {
@@ -4535,17 +4709,19 @@ static int stac92xx_resume(struct hda_codec *codec)
4535 return 0; 4709 return 0;
4536} 4710}
4537 4711
4538
4539/* 4712/*
4540 * using power check for controlling mute led of HP HDX notebooks 4713 * using power check for controlling mute led of HP notebooks
4541 * check for mute state only on Speakers (nid = 0x10) 4714 * check for mute state only on Speakers (nid = 0x10)
4542 * 4715 *
4543 * For this feature CONFIG_SND_HDA_POWER_SAVE is needed, otherwise 4716 * For this feature CONFIG_SND_HDA_POWER_SAVE is needed, otherwise
4544 * the LED is NOT working properly ! 4717 * the LED is NOT working properly !
4718 *
4719 * Changed name to reflect that it now works for any designated
4720 * model, not just HP HDX.
4545 */ 4721 */
4546 4722
4547#ifdef CONFIG_SND_HDA_POWER_SAVE 4723#ifdef CONFIG_SND_HDA_POWER_SAVE
4548static int stac92xx_hp_hdx_check_power_status(struct hda_codec *codec, 4724static int stac92xx_hp_check_power_status(struct hda_codec *codec,
4549 hda_nid_t nid) 4725 hda_nid_t nid)
4550{ 4726{
4551 struct sigmatel_spec *spec = codec->spec; 4727 struct sigmatel_spec *spec = codec->spec;
@@ -4553,9 +4729,9 @@ static int stac92xx_hp_hdx_check_power_status(struct hda_codec *codec,
4553 if (nid == 0x10) { 4729 if (nid == 0x10) {
4554 if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) & 4730 if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
4555 HDA_AMP_MUTE) 4731 HDA_AMP_MUTE)
4556 spec->gpio_data &= ~0x08; /* orange */ 4732 spec->gpio_data &= ~spec->gpio_led; /* orange */
4557 else 4733 else
4558 spec->gpio_data |= 0x08; /* white */ 4734 spec->gpio_data |= spec->gpio_led; /* white */
4559 4735
4560 stac_gpio_set(codec, spec->gpio_mask, 4736 stac_gpio_set(codec, spec->gpio_mask,
4561 spec->gpio_dir, 4737 spec->gpio_dir,
@@ -5201,6 +5377,15 @@ again:
5201 if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) 5377 if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
5202 snd_hda_sequence_write_cache(codec, unmute_init); 5378 snd_hda_sequence_write_cache(codec, unmute_init);
5203 5379
5380 /* Some HP machines seem to have unstable codec communications
5381 * especially with ATI fglrx driver. For recovering from the
5382 * CORB/RIRB stall, allow the BUS reset and keep always sync
5383 */
5384 if (spec->board_config == STAC_HP_DV5) {
5385 codec->bus->sync_write = 1;
5386 codec->bus->allow_bus_reset = 1;
5387 }
5388
5204 spec->aloopback_ctl = stac92hd71bxx_loopback; 5389 spec->aloopback_ctl = stac92hd71bxx_loopback;
5205 spec->aloopback_mask = 0x50; 5390 spec->aloopback_mask = 0x50;
5206 spec->aloopback_shift = 0; 5391 spec->aloopback_shift = 0;
@@ -5234,6 +5419,15 @@ again:
5234 spec->num_smuxes = 0; 5419 spec->num_smuxes = 0;
5235 spec->num_dmuxes = 1; 5420 spec->num_dmuxes = 1;
5236 break; 5421 break;
5422 case STAC_HP_DV4_1222NR:
5423 spec->num_dmics = 1;
5424 /* I don't know if it needs 1 or 2 smuxes - will wait for
5425 * bug reports to fix if needed
5426 */
5427 spec->num_smuxes = 1;
5428 spec->num_dmuxes = 1;
5429 spec->gpio_led = 0x01;
5430 /* fallthrough */
5237 case STAC_HP_DV5: 5431 case STAC_HP_DV5:
5238 snd_hda_codec_set_pincfg(codec, 0x0d, 0x90170010); 5432 snd_hda_codec_set_pincfg(codec, 0x0d, 0x90170010);
5239 stac92xx_auto_set_pinctl(codec, 0x0d, AC_PINCTL_OUT_EN); 5433 stac92xx_auto_set_pinctl(codec, 0x0d, AC_PINCTL_OUT_EN);
@@ -5242,22 +5436,21 @@ again:
5242 spec->num_dmics = 1; 5436 spec->num_dmics = 1;
5243 spec->num_dmuxes = 1; 5437 spec->num_dmuxes = 1;
5244 spec->num_smuxes = 1; 5438 spec->num_smuxes = 1;
5245 /*
5246 * For controlling MUTE LED on HP HDX16/HDX18 notebooks,
5247 * the CONFIG_SND_HDA_POWER_SAVE is needed to be set.
5248 */
5249#ifdef CONFIG_SND_HDA_POWER_SAVE
5250 /* orange/white mute led on GPIO3, orange=0, white=1 */ 5439 /* orange/white mute led on GPIO3, orange=0, white=1 */
5251 spec->gpio_mask |= 0x08; 5440 spec->gpio_led = 0x08;
5252 spec->gpio_dir |= 0x08; 5441 break;
5253 spec->gpio_data |= 0x08; /* set to white */ 5442 }
5254 5443
5444#ifdef CONFIG_SND_HDA_POWER_SAVE
5445 if (spec->gpio_led) {
5446 spec->gpio_mask |= spec->gpio_led;
5447 spec->gpio_dir |= spec->gpio_led;
5448 spec->gpio_data |= spec->gpio_led;
5255 /* register check_power_status callback. */ 5449 /* register check_power_status callback. */
5256 codec->patch_ops.check_power_status = 5450 codec->patch_ops.check_power_status =
5257 stac92xx_hp_hdx_check_power_status; 5451 stac92xx_hp_check_power_status;
5452 }
5258#endif 5453#endif
5259 break;
5260 };
5261 5454
5262 spec->multiout.dac_nids = spec->dac_nids; 5455 spec->multiout.dac_nids = spec->dac_nids;
5263 if (spec->dinput_mux) 5456 if (spec->dinput_mux)
@@ -5282,7 +5475,7 @@ again:
5282 codec->proc_widget_hook = stac92hd7x_proc_hook; 5475 codec->proc_widget_hook = stac92hd7x_proc_hook;
5283 5476
5284 return 0; 5477 return 0;
5285}; 5478}
5286 5479
5287static int patch_stac922x(struct hda_codec *codec) 5480static int patch_stac922x(struct hda_codec *codec)
5288{ 5481{
@@ -5437,7 +5630,7 @@ static int patch_stac927x(struct hda_codec *codec)
5437 /* correct the device field to SPDIF out */ 5630 /* correct the device field to SPDIF out */
5438 snd_hda_codec_set_pincfg(codec, 0x21, 0x01442070); 5631 snd_hda_codec_set_pincfg(codec, 0x21, 0x01442070);
5439 break; 5632 break;
5440 }; 5633 }
5441 /* configure the analog microphone on some laptops */ 5634 /* configure the analog microphone on some laptops */
5442 snd_hda_codec_set_pincfg(codec, 0x0c, 0x90a79130); 5635 snd_hda_codec_set_pincfg(codec, 0x0c, 0x90a79130);
5443 /* correct the front output jack as a hp out */ 5636 /* correct the front output jack as a hp out */
@@ -5747,6 +5940,7 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
5747 { .id = 0x83847661, .name = "CXD9872RD/K", .patch = patch_stac9872 }, 5940 { .id = 0x83847661, .name = "CXD9872RD/K", .patch = patch_stac9872 },
5748 { .id = 0x83847662, .name = "STAC9872AK", .patch = patch_stac9872 }, 5941 { .id = 0x83847662, .name = "STAC9872AK", .patch = patch_stac9872 },
5749 { .id = 0x83847664, .name = "CXD9872AKD", .patch = patch_stac9872 }, 5942 { .id = 0x83847664, .name = "CXD9872AKD", .patch = patch_stac9872 },
5943 { .id = 0x83847698, .name = "STAC9205", .patch = patch_stac9205 },
5750 { .id = 0x838476a0, .name = "STAC9205", .patch = patch_stac9205 }, 5944 { .id = 0x838476a0, .name = "STAC9205", .patch = patch_stac9205 },
5751 { .id = 0x838476a1, .name = "STAC9205D", .patch = patch_stac9205 }, 5945 { .id = 0x838476a1, .name = "STAC9205D", .patch = patch_stac9205 },
5752 { .id = 0x838476a2, .name = "STAC9204", .patch = patch_stac9205 }, 5946 { .id = 0x838476a2, .name = "STAC9204", .patch = patch_stac9205 },
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index b25a5cc637d6..8e004fb6961a 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -205,7 +205,7 @@ struct via_spec {
205 205
206 /* playback */ 206 /* playback */
207 struct hda_multi_out multiout; 207 struct hda_multi_out multiout;
208 hda_nid_t extra_dig_out_nid; 208 hda_nid_t slave_dig_outs[2];
209 209
210 /* capture */ 210 /* capture */
211 unsigned int num_adc_nids; 211 unsigned int num_adc_nids;
@@ -731,21 +731,6 @@ static int via_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
731 return snd_hda_multi_out_dig_close(codec, &spec->multiout); 731 return snd_hda_multi_out_dig_close(codec, &spec->multiout);
732} 732}
733 733
734/* setup SPDIF output stream */
735static void setup_dig_playback_stream(struct hda_codec *codec, hda_nid_t nid,
736 unsigned int stream_tag, unsigned int format)
737{
738 /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */
739 if (codec->spdif_ctls & AC_DIG1_ENABLE)
740 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_DIGI_CONVERT_1,
741 codec->spdif_ctls & ~AC_DIG1_ENABLE & 0xff);
742 snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format);
743 /* turn on again (if needed) */
744 if (codec->spdif_ctls & AC_DIG1_ENABLE)
745 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_DIGI_CONVERT_1,
746 codec->spdif_ctls & 0xff);
747}
748
749static int via_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, 734static int via_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
750 struct hda_codec *codec, 735 struct hda_codec *codec,
751 unsigned int stream_tag, 736 unsigned int stream_tag,
@@ -753,19 +738,16 @@ static int via_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
753 struct snd_pcm_substream *substream) 738 struct snd_pcm_substream *substream)
754{ 739{
755 struct via_spec *spec = codec->spec; 740 struct via_spec *spec = codec->spec;
756 hda_nid_t nid; 741 return snd_hda_multi_out_dig_prepare(codec, &spec->multiout,
757 742 stream_tag, format, substream);
758 /* 1st or 2nd S/PDIF */ 743}
759 if (substream->number == 0)
760 nid = spec->multiout.dig_out_nid;
761 else if (substream->number == 1)
762 nid = spec->extra_dig_out_nid;
763 else
764 return -1;
765 744
766 mutex_lock(&codec->spdif_mutex); 745static int via_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
767 setup_dig_playback_stream(codec, nid, stream_tag, format); 746 struct hda_codec *codec,
768 mutex_unlock(&codec->spdif_mutex); 747 struct snd_pcm_substream *substream)
748{
749 struct via_spec *spec = codec->spec;
750 snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
769 return 0; 751 return 0;
770} 752}
771 753
@@ -842,7 +824,8 @@ static struct hda_pcm_stream vt1708_pcm_digital_playback = {
842 .ops = { 824 .ops = {
843 .open = via_dig_playback_pcm_open, 825 .open = via_dig_playback_pcm_open,
844 .close = via_dig_playback_pcm_close, 826 .close = via_dig_playback_pcm_close,
845 .prepare = via_dig_playback_pcm_prepare 827 .prepare = via_dig_playback_pcm_prepare,
828 .cleanup = via_dig_playback_pcm_cleanup
846 }, 829 },
847}; 830};
848 831
@@ -874,13 +857,6 @@ static int via_build_controls(struct hda_codec *codec)
874 if (err < 0) 857 if (err < 0)
875 return err; 858 return err;
876 spec->multiout.share_spdif = 1; 859 spec->multiout.share_spdif = 1;
877
878 if (spec->extra_dig_out_nid) {
879 err = snd_hda_create_spdif_out_ctls(codec,
880 spec->extra_dig_out_nid);
881 if (err < 0)
882 return err;
883 }
884 } 860 }
885 if (spec->dig_in_nid) { 861 if (spec->dig_in_nid) {
886 err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid); 862 err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid);
@@ -1013,10 +989,6 @@ static void via_unsol_event(struct hda_codec *codec,
1013 via_gpio_control(codec); 989 via_gpio_control(codec);
1014} 990}
1015 991
1016static hda_nid_t slave_dig_outs[] = {
1017 0,
1018};
1019
1020static int via_init(struct hda_codec *codec) 992static int via_init(struct hda_codec *codec)
1021{ 993{
1022 struct via_spec *spec = codec->spec; 994 struct via_spec *spec = codec->spec;
@@ -1051,8 +1023,9 @@ static int via_init(struct hda_codec *codec)
1051 snd_hda_codec_write(codec, spec->autocfg.dig_in_pin, 0, 1023 snd_hda_codec_write(codec, spec->autocfg.dig_in_pin, 0,
1052 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN); 1024 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN);
1053 1025
1054 /* no slave outs */ 1026 /* assign slave outs */
1055 codec->slave_dig_outs = slave_dig_outs; 1027 if (spec->slave_dig_outs[0])
1028 codec->slave_dig_outs = spec->slave_dig_outs;
1056 1029
1057 return 0; 1030 return 0;
1058} 1031}
@@ -2134,7 +2107,8 @@ static struct hda_pcm_stream vt1708B_pcm_digital_playback = {
2134 .ops = { 2107 .ops = {
2135 .open = via_dig_playback_pcm_open, 2108 .open = via_dig_playback_pcm_open,
2136 .close = via_dig_playback_pcm_close, 2109 .close = via_dig_playback_pcm_close,
2137 .prepare = via_dig_playback_pcm_prepare 2110 .prepare = via_dig_playback_pcm_prepare,
2111 .cleanup = via_dig_playback_pcm_cleanup
2138 }, 2112 },
2139}; 2113};
2140 2114
@@ -2589,14 +2563,15 @@ static struct hda_pcm_stream vt1708S_pcm_analog_capture = {
2589}; 2563};
2590 2564
2591static struct hda_pcm_stream vt1708S_pcm_digital_playback = { 2565static struct hda_pcm_stream vt1708S_pcm_digital_playback = {
2592 .substreams = 2, 2566 .substreams = 1,
2593 .channels_min = 2, 2567 .channels_min = 2,
2594 .channels_max = 2, 2568 .channels_max = 2,
2595 /* NID is set in via_build_pcms */ 2569 /* NID is set in via_build_pcms */
2596 .ops = { 2570 .ops = {
2597 .open = via_dig_playback_pcm_open, 2571 .open = via_dig_playback_pcm_open,
2598 .close = via_dig_playback_pcm_close, 2572 .close = via_dig_playback_pcm_close,
2599 .prepare = via_dig_playback_pcm_prepare 2573 .prepare = via_dig_playback_pcm_prepare,
2574 .cleanup = via_dig_playback_pcm_cleanup
2600 }, 2575 },
2601}; 2576};
2602 2577
@@ -2805,14 +2780,37 @@ static int vt1708S_auto_create_analog_input_ctls(struct via_spec *spec,
2805 return 0; 2780 return 0;
2806} 2781}
2807 2782
2783/* fill out digital output widgets; one for master and one for slave outputs */
2784static void fill_dig_outs(struct hda_codec *codec)
2785{
2786 struct via_spec *spec = codec->spec;
2787 int i;
2788
2789 for (i = 0; i < spec->autocfg.dig_outs; i++) {
2790 hda_nid_t nid;
2791 int conn;
2792
2793 nid = spec->autocfg.dig_out_pins[i];
2794 if (!nid)
2795 continue;
2796 conn = snd_hda_get_connections(codec, nid, &nid, 1);
2797 if (conn < 1)
2798 continue;
2799 if (!spec->multiout.dig_out_nid)
2800 spec->multiout.dig_out_nid = nid;
2801 else {
2802 spec->slave_dig_outs[0] = nid;
2803 break; /* at most two dig outs */
2804 }
2805 }
2806}
2807
2808static int vt1708S_parse_auto_config(struct hda_codec *codec) 2808static int vt1708S_parse_auto_config(struct hda_codec *codec)
2809{ 2809{
2810 struct via_spec *spec = codec->spec; 2810 struct via_spec *spec = codec->spec;
2811 int err; 2811 int err;
2812 static hda_nid_t vt1708s_ignore[] = {0x21, 0};
2813 2812
2814 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, 2813 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
2815 vt1708s_ignore);
2816 if (err < 0) 2814 if (err < 0)
2817 return err; 2815 return err;
2818 err = vt1708S_auto_fill_dac_nids(spec, &spec->autocfg); 2816 err = vt1708S_auto_fill_dac_nids(spec, &spec->autocfg);
@@ -2833,10 +2831,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec)
2833 2831
2834 spec->multiout.max_channels = spec->multiout.num_dacs * 2; 2832 spec->multiout.max_channels = spec->multiout.num_dacs * 2;
2835 2833
2836 if (spec->autocfg.dig_outs) 2834 fill_dig_outs(codec);
2837 spec->multiout.dig_out_nid = VT1708S_DIGOUT_NID;
2838
2839 spec->extra_dig_out_nid = 0x15;
2840 2835
2841 if (spec->kctls.list) 2836 if (spec->kctls.list)
2842 spec->mixers[spec->num_mixers++] = spec->kctls.list; 2837 spec->mixers[spec->num_mixers++] = spec->kctls.list;
@@ -3000,7 +2995,8 @@ static struct hda_pcm_stream vt1702_pcm_digital_playback = {
3000 .ops = { 2995 .ops = {
3001 .open = via_dig_playback_pcm_open, 2996 .open = via_dig_playback_pcm_open,
3002 .close = via_dig_playback_pcm_close, 2997 .close = via_dig_playback_pcm_close,
3003 .prepare = via_dig_playback_pcm_prepare 2998 .prepare = via_dig_playback_pcm_prepare,
2999 .cleanup = via_dig_playback_pcm_cleanup
3004 }, 3000 },
3005}; 3001};
3006 3002
@@ -3128,10 +3124,8 @@ static int vt1702_parse_auto_config(struct hda_codec *codec)
3128{ 3124{
3129 struct via_spec *spec = codec->spec; 3125 struct via_spec *spec = codec->spec;
3130 int err; 3126 int err;
3131 static hda_nid_t vt1702_ignore[] = {0x1C, 0};
3132 3127
3133 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, 3128 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
3134 vt1702_ignore);
3135 if (err < 0) 3129 if (err < 0)
3136 return err; 3130 return err;
3137 err = vt1702_auto_fill_dac_nids(spec, &spec->autocfg); 3131 err = vt1702_auto_fill_dac_nids(spec, &spec->autocfg);
@@ -3152,10 +3146,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec)
3152 3146
3153 spec->multiout.max_channels = spec->multiout.num_dacs * 2; 3147 spec->multiout.max_channels = spec->multiout.num_dacs * 2;
3154 3148
3155 if (spec->autocfg.dig_outs) 3149 fill_dig_outs(codec);
3156 spec->multiout.dig_out_nid = VT1702_DIGOUT_NID;
3157
3158 spec->extra_dig_out_nid = 0x1B;
3159 3150
3160 if (spec->kctls.list) 3151 if (spec->kctls.list)
3161 spec->mixers[spec->num_mixers++] = spec->kctls.list; 3152 spec->mixers[spec->num_mixers++] = spec->kctls.list;
diff --git a/sound/pci/ice1712/Makefile b/sound/pci/ice1712/Makefile
index f99fe089495d..536eae2ccf94 100644
--- a/sound/pci/ice1712/Makefile
+++ b/sound/pci/ice1712/Makefile
@@ -5,7 +5,7 @@
5 5
6snd-ice17xx-ak4xxx-objs := ak4xxx.o 6snd-ice17xx-ak4xxx-objs := ak4xxx.o
7snd-ice1712-objs := ice1712.o delta.o hoontech.o ews.o 7snd-ice1712-objs := ice1712.o delta.o hoontech.o ews.o
8snd-ice1724-objs := ice1724.o amp.o revo.o aureon.o vt1720_mobo.o pontis.o prodigy192.o prodigy_hifi.o juli.o phase.o wtm.o se.o 8snd-ice1724-objs := ice1724.o amp.o revo.o aureon.o vt1720_mobo.o pontis.o prodigy192.o prodigy_hifi.o juli.o phase.o wtm.o se.o maya44.o
9 9
10# Toplevel Module Dependency 10# Toplevel Module Dependency
11obj-$(CONFIG_SND_ICE1712) += snd-ice1712.o snd-ice17xx-ak4xxx.o 11obj-$(CONFIG_SND_ICE1712) += snd-ice1712.o snd-ice17xx-ak4xxx.o
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index fdae6deba16b..adc909ec125c 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -335,6 +335,7 @@ struct snd_ice1712 {
335 unsigned int force_rdma1:1; /* VT1720/4 - RDMA1 as non-spdif */ 335 unsigned int force_rdma1:1; /* VT1720/4 - RDMA1 as non-spdif */
336 unsigned int midi_output:1; /* VT1720/4: MIDI output triggered */ 336 unsigned int midi_output:1; /* VT1720/4: MIDI output triggered */
337 unsigned int midi_input:1; /* VT1720/4: MIDI input triggered */ 337 unsigned int midi_input:1; /* VT1720/4: MIDI input triggered */
338 unsigned int own_routing:1; /* VT1720/4: use own routing ctls */
338 unsigned int num_total_dacs; /* total DACs */ 339 unsigned int num_total_dacs; /* total DACs */
339 unsigned int num_total_adcs; /* total ADCs */ 340 unsigned int num_total_adcs; /* total ADCs */
340 unsigned int cur_rate; /* current rate */ 341 unsigned int cur_rate; /* current rate */
@@ -458,10 +459,17 @@ static inline int snd_ice1712_gpio_read_bits(struct snd_ice1712 *ice,
458 return snd_ice1712_gpio_read(ice) & mask; 459 return snd_ice1712_gpio_read(ice) & mask;
459} 460}
460 461
462/* route access functions */
463int snd_ice1724_get_route_val(struct snd_ice1712 *ice, int shift);
464int snd_ice1724_put_route_val(struct snd_ice1712 *ice, unsigned int val,
465 int shift);
466
461int snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice); 467int snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice);
462 468
463int snd_ice1712_akm4xxx_init(struct snd_akm4xxx *ak, const struct snd_akm4xxx *template, 469int snd_ice1712_akm4xxx_init(struct snd_akm4xxx *ak,
464 const struct snd_ak4xxx_private *priv, struct snd_ice1712 *ice); 470 const struct snd_akm4xxx *template,
471 const struct snd_ak4xxx_private *priv,
472 struct snd_ice1712 *ice);
465void snd_ice1712_akm4xxx_free(struct snd_ice1712 *ice); 473void snd_ice1712_akm4xxx_free(struct snd_ice1712 *ice);
466int snd_ice1712_akm4xxx_build_controls(struct snd_ice1712 *ice); 474int snd_ice1712_akm4xxx_build_controls(struct snd_ice1712 *ice);
467 475
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 128510e77a78..36ade77cf371 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -49,6 +49,7 @@
49#include "prodigy192.h" 49#include "prodigy192.h"
50#include "prodigy_hifi.h" 50#include "prodigy_hifi.h"
51#include "juli.h" 51#include "juli.h"
52#include "maya44.h"
52#include "phase.h" 53#include "phase.h"
53#include "wtm.h" 54#include "wtm.h"
54#include "se.h" 55#include "se.h"
@@ -65,6 +66,7 @@ MODULE_SUPPORTED_DEVICE("{"
65 PRODIGY192_DEVICE_DESC 66 PRODIGY192_DEVICE_DESC
66 PRODIGY_HIFI_DEVICE_DESC 67 PRODIGY_HIFI_DEVICE_DESC
67 JULI_DEVICE_DESC 68 JULI_DEVICE_DESC
69 MAYA44_DEVICE_DESC
68 PHASE_DEVICE_DESC 70 PHASE_DEVICE_DESC
69 WTM_DEVICE_DESC 71 WTM_DEVICE_DESC
70 SE_DEVICE_DESC 72 SE_DEVICE_DESC
@@ -626,7 +628,7 @@ static unsigned char stdclock_set_mclk(struct snd_ice1712 *ice,
626 return 0; 628 return 0;
627} 629}
628 630
629static void snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, 631static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
630 int force) 632 int force)
631{ 633{
632 unsigned long flags; 634 unsigned long flags;
@@ -634,17 +636,18 @@ static void snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
634 unsigned int i, old_rate; 636 unsigned int i, old_rate;
635 637
636 if (rate > ice->hw_rates->list[ice->hw_rates->count - 1]) 638 if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
637 return; 639 return -EINVAL;
640
638 spin_lock_irqsave(&ice->reg_lock, flags); 641 spin_lock_irqsave(&ice->reg_lock, flags);
639 if ((inb(ICEMT1724(ice, DMA_CONTROL)) & DMA_STARTS) || 642 if ((inb(ICEMT1724(ice, DMA_CONTROL)) & DMA_STARTS) ||
640 (inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) { 643 (inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) {
641 /* running? we cannot change the rate now... */ 644 /* running? we cannot change the rate now... */
642 spin_unlock_irqrestore(&ice->reg_lock, flags); 645 spin_unlock_irqrestore(&ice->reg_lock, flags);
643 return; 646 return -EBUSY;
644 } 647 }
645 if (!force && is_pro_rate_locked(ice)) { 648 if (!force && is_pro_rate_locked(ice)) {
646 spin_unlock_irqrestore(&ice->reg_lock, flags); 649 spin_unlock_irqrestore(&ice->reg_lock, flags);
647 return; 650 return (rate == ice->cur_rate) ? 0 : -EBUSY;
648 } 651 }
649 652
650 old_rate = ice->get_rate(ice); 653 old_rate = ice->get_rate(ice);
@@ -652,7 +655,7 @@ static void snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
652 ice->set_rate(ice, rate); 655 ice->set_rate(ice, rate);
653 else if (rate == ice->cur_rate) { 656 else if (rate == ice->cur_rate) {
654 spin_unlock_irqrestore(&ice->reg_lock, flags); 657 spin_unlock_irqrestore(&ice->reg_lock, flags);
655 return; 658 return 0;
656 } 659 }
657 660
658 ice->cur_rate = rate; 661 ice->cur_rate = rate;
@@ -674,13 +677,15 @@ static void snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
674 } 677 }
675 if (ice->spdif.ops.setup_rate) 678 if (ice->spdif.ops.setup_rate)
676 ice->spdif.ops.setup_rate(ice, rate); 679 ice->spdif.ops.setup_rate(ice, rate);
680
681 return 0;
677} 682}
678 683
679static int snd_vt1724_pcm_hw_params(struct snd_pcm_substream *substream, 684static int snd_vt1724_pcm_hw_params(struct snd_pcm_substream *substream,
680 struct snd_pcm_hw_params *hw_params) 685 struct snd_pcm_hw_params *hw_params)
681{ 686{
682 struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); 687 struct snd_ice1712 *ice = snd_pcm_substream_chip(substream);
683 int i, chs; 688 int i, chs, err;
684 689
685 chs = params_channels(hw_params); 690 chs = params_channels(hw_params);
686 mutex_lock(&ice->open_mutex); 691 mutex_lock(&ice->open_mutex);
@@ -715,7 +720,11 @@ static int snd_vt1724_pcm_hw_params(struct snd_pcm_substream *substream,
715 } 720 }
716 } 721 }
717 mutex_unlock(&ice->open_mutex); 722 mutex_unlock(&ice->open_mutex);
718 snd_vt1724_set_pro_rate(ice, params_rate(hw_params), 0); 723
724 err = snd_vt1724_set_pro_rate(ice, params_rate(hw_params), 0);
725 if (err < 0)
726 return err;
727
719 return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); 728 return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
720} 729}
721 730
@@ -848,20 +857,39 @@ static snd_pcm_uframes_t snd_vt1724_pcm_pointer(struct snd_pcm_substream *substr
848#endif 857#endif
849} 858}
850 859
851static const struct vt1724_pcm_reg vt1724_playback_pro_reg = { 860static const struct vt1724_pcm_reg vt1724_pdma0_reg = {
852 .addr = VT1724_MT_PLAYBACK_ADDR, 861 .addr = VT1724_MT_PLAYBACK_ADDR,
853 .size = VT1724_MT_PLAYBACK_SIZE, 862 .size = VT1724_MT_PLAYBACK_SIZE,
854 .count = VT1724_MT_PLAYBACK_COUNT, 863 .count = VT1724_MT_PLAYBACK_COUNT,
855 .start = VT1724_PDMA0_START, 864 .start = VT1724_PDMA0_START,
856}; 865};
857 866
858static const struct vt1724_pcm_reg vt1724_capture_pro_reg = { 867static const struct vt1724_pcm_reg vt1724_pdma4_reg = {
868 .addr = VT1724_MT_PDMA4_ADDR,
869 .size = VT1724_MT_PDMA4_SIZE,
870 .count = VT1724_MT_PDMA4_COUNT,
871 .start = VT1724_PDMA4_START,
872};
873
874static const struct vt1724_pcm_reg vt1724_rdma0_reg = {
859 .addr = VT1724_MT_CAPTURE_ADDR, 875 .addr = VT1724_MT_CAPTURE_ADDR,
860 .size = VT1724_MT_CAPTURE_SIZE, 876 .size = VT1724_MT_CAPTURE_SIZE,
861 .count = VT1724_MT_CAPTURE_COUNT, 877 .count = VT1724_MT_CAPTURE_COUNT,
862 .start = VT1724_RDMA0_START, 878 .start = VT1724_RDMA0_START,
863}; 879};
864 880
881static const struct vt1724_pcm_reg vt1724_rdma1_reg = {
882 .addr = VT1724_MT_RDMA1_ADDR,
883 .size = VT1724_MT_RDMA1_SIZE,
884 .count = VT1724_MT_RDMA1_COUNT,
885 .start = VT1724_RDMA1_START,
886};
887
888#define vt1724_playback_pro_reg vt1724_pdma0_reg
889#define vt1724_playback_spdif_reg vt1724_pdma4_reg
890#define vt1724_capture_pro_reg vt1724_rdma0_reg
891#define vt1724_capture_spdif_reg vt1724_rdma1_reg
892
865static const struct snd_pcm_hardware snd_vt1724_playback_pro = { 893static const struct snd_pcm_hardware snd_vt1724_playback_pro = {
866 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 894 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
867 SNDRV_PCM_INFO_BLOCK_TRANSFER | 895 SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -1077,20 +1105,6 @@ static int __devinit snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device)
1077 * SPDIF PCM 1105 * SPDIF PCM
1078 */ 1106 */
1079 1107
1080static const struct vt1724_pcm_reg vt1724_playback_spdif_reg = {
1081 .addr = VT1724_MT_PDMA4_ADDR,
1082 .size = VT1724_MT_PDMA4_SIZE,
1083 .count = VT1724_MT_PDMA4_COUNT,
1084 .start = VT1724_PDMA4_START,
1085};
1086
1087static const struct vt1724_pcm_reg vt1724_capture_spdif_reg = {
1088 .addr = VT1724_MT_RDMA1_ADDR,
1089 .size = VT1724_MT_RDMA1_SIZE,
1090 .count = VT1724_MT_RDMA1_COUNT,
1091 .start = VT1724_RDMA1_START,
1092};
1093
1094/* update spdif control bits; call with reg_lock */ 1108/* update spdif control bits; call with reg_lock */
1095static void update_spdif_bits(struct snd_ice1712 *ice, unsigned int val) 1109static void update_spdif_bits(struct snd_ice1712 *ice, unsigned int val)
1096{ 1110{
@@ -1963,7 +1977,7 @@ static inline int digital_route_shift(int idx)
1963 return idx * 3; 1977 return idx * 3;
1964} 1978}
1965 1979
1966static int get_route_val(struct snd_ice1712 *ice, int shift) 1980int snd_ice1724_get_route_val(struct snd_ice1712 *ice, int shift)
1967{ 1981{
1968 unsigned long val; 1982 unsigned long val;
1969 unsigned char eitem; 1983 unsigned char eitem;
@@ -1982,7 +1996,8 @@ static int get_route_val(struct snd_ice1712 *ice, int shift)
1982 return eitem; 1996 return eitem;
1983} 1997}
1984 1998
1985static int put_route_val(struct snd_ice1712 *ice, unsigned int val, int shift) 1999int snd_ice1724_put_route_val(struct snd_ice1712 *ice, unsigned int val,
2000 int shift)
1986{ 2001{
1987 unsigned int old_val, nval; 2002 unsigned int old_val, nval;
1988 int change; 2003 int change;
@@ -2010,7 +2025,7 @@ static int snd_vt1724_pro_route_analog_get(struct snd_kcontrol *kcontrol,
2010 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); 2025 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
2011 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2026 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2012 ucontrol->value.enumerated.item[0] = 2027 ucontrol->value.enumerated.item[0] =
2013 get_route_val(ice, analog_route_shift(idx)); 2028 snd_ice1724_get_route_val(ice, analog_route_shift(idx));
2014 return 0; 2029 return 0;
2015} 2030}
2016 2031
@@ -2019,8 +2034,9 @@ static int snd_vt1724_pro_route_analog_put(struct snd_kcontrol *kcontrol,
2019{ 2034{
2020 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); 2035 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
2021 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2036 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2022 return put_route_val(ice, ucontrol->value.enumerated.item[0], 2037 return snd_ice1724_put_route_val(ice,
2023 analog_route_shift(idx)); 2038 ucontrol->value.enumerated.item[0],
2039 analog_route_shift(idx));
2024} 2040}
2025 2041
2026static int snd_vt1724_pro_route_spdif_get(struct snd_kcontrol *kcontrol, 2042static int snd_vt1724_pro_route_spdif_get(struct snd_kcontrol *kcontrol,
@@ -2029,7 +2045,7 @@ static int snd_vt1724_pro_route_spdif_get(struct snd_kcontrol *kcontrol,
2029 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); 2045 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
2030 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2046 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2031 ucontrol->value.enumerated.item[0] = 2047 ucontrol->value.enumerated.item[0] =
2032 get_route_val(ice, digital_route_shift(idx)); 2048 snd_ice1724_get_route_val(ice, digital_route_shift(idx));
2033 return 0; 2049 return 0;
2034} 2050}
2035 2051
@@ -2038,11 +2054,13 @@ static int snd_vt1724_pro_route_spdif_put(struct snd_kcontrol *kcontrol,
2038{ 2054{
2039 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); 2055 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
2040 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2056 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2041 return put_route_val(ice, ucontrol->value.enumerated.item[0], 2057 return snd_ice1724_put_route_val(ice,
2042 digital_route_shift(idx)); 2058 ucontrol->value.enumerated.item[0],
2059 digital_route_shift(idx));
2043} 2060}
2044 2061
2045static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route __devinitdata = { 2062static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route __devinitdata =
2063{
2046 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2064 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2047 .name = "H/W Playback Route", 2065 .name = "H/W Playback Route",
2048 .info = snd_vt1724_pro_route_info, 2066 .info = snd_vt1724_pro_route_info,
@@ -2109,6 +2127,7 @@ static struct snd_ice1712_card_info *card_tables[] __devinitdata = {
2109 snd_vt1724_prodigy_hifi_cards, 2127 snd_vt1724_prodigy_hifi_cards,
2110 snd_vt1724_prodigy192_cards, 2128 snd_vt1724_prodigy192_cards,
2111 snd_vt1724_juli_cards, 2129 snd_vt1724_juli_cards,
2130 snd_vt1724_maya44_cards,
2112 snd_vt1724_phase_cards, 2131 snd_vt1724_phase_cards,
2113 snd_vt1724_wtm_cards, 2132 snd_vt1724_wtm_cards,
2114 snd_vt1724_se_cards, 2133 snd_vt1724_se_cards,
@@ -2246,8 +2265,10 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
2246static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice) 2265static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice)
2247{ 2266{
2248 outb(VT1724_RESET , ICEREG1724(ice, CONTROL)); 2267 outb(VT1724_RESET , ICEREG1724(ice, CONTROL));
2268 inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
2249 msleep(10); 2269 msleep(10);
2250 outb(0, ICEREG1724(ice, CONTROL)); 2270 outb(0, ICEREG1724(ice, CONTROL));
2271 inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
2251 msleep(10); 2272 msleep(10);
2252} 2273}
2253 2274
@@ -2277,9 +2298,12 @@ static int __devinit snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
2277 if (snd_BUG_ON(!ice->pcm)) 2298 if (snd_BUG_ON(!ice->pcm))
2278 return -EIO; 2299 return -EIO;
2279 2300
2280 err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_mixer_pro_spdif_route, ice)); 2301 if (!ice->own_routing) {
2281 if (err < 0) 2302 err = snd_ctl_add(ice->card,
2282 return err; 2303 snd_ctl_new1(&snd_vt1724_mixer_pro_spdif_route, ice));
2304 if (err < 0)
2305 return err;
2306 }
2283 2307
2284 err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_spdif_switch, ice)); 2308 err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_spdif_switch, ice));
2285 if (err < 0) 2309 if (err < 0)
@@ -2326,7 +2350,7 @@ static int __devinit snd_vt1724_build_controls(struct snd_ice1712 *ice)
2326 if (err < 0) 2350 if (err < 0)
2327 return err; 2351 return err;
2328 2352
2329 if (ice->num_total_dacs > 0) { 2353 if (!ice->own_routing && ice->num_total_dacs > 0) {
2330 struct snd_kcontrol_new tmp = snd_vt1724_mixer_pro_analog_route; 2354 struct snd_kcontrol_new tmp = snd_vt1724_mixer_pro_analog_route;
2331 tmp.count = ice->num_total_dacs; 2355 tmp.count = ice->num_total_dacs;
2332 if (ice->vt1720 && tmp.count > 2) 2356 if (ice->vt1720 && tmp.count > 2)
diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c
new file mode 100644
index 000000000000..3e1c20ae2f1c
--- /dev/null
+++ b/sound/pci/ice1712/maya44.c
@@ -0,0 +1,779 @@
1/*
2 * ALSA driver for ICEnsemble VT1724 (Envy24HT)
3 *
4 * Lowlevel functions for ESI Maya44 cards
5 *
6 * Copyright (c) 2009 Takashi Iwai <tiwai@suse.de>
7 * Based on the patches by Rainer Zimmermann <mail@lightshed.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/io.h>
28#include <sound/core.h>
29#include <sound/control.h>
30#include <sound/pcm.h>
31#include <sound/tlv.h>
32
33#include "ice1712.h"
34#include "envy24ht.h"
35#include "maya44.h"
36
37/* WM8776 register indexes */
38#define WM8776_REG_HEADPHONE_L 0x00
39#define WM8776_REG_HEADPHONE_R 0x01
40#define WM8776_REG_HEADPHONE_MASTER 0x02
41#define WM8776_REG_DAC_ATTEN_L 0x03
42#define WM8776_REG_DAC_ATTEN_R 0x04
43#define WM8776_REG_DAC_ATTEN_MASTER 0x05
44#define WM8776_REG_DAC_PHASE 0x06
45#define WM8776_REG_DAC_CONTROL 0x07
46#define WM8776_REG_DAC_MUTE 0x08
47#define WM8776_REG_DAC_DEEMPH 0x09
48#define WM8776_REG_DAC_IF_CONTROL 0x0a
49#define WM8776_REG_ADC_IF_CONTROL 0x0b
50#define WM8776_REG_MASTER_MODE_CONTROL 0x0c
51#define WM8776_REG_POWERDOWN 0x0d
52#define WM8776_REG_ADC_ATTEN_L 0x0e
53#define WM8776_REG_ADC_ATTEN_R 0x0f
54#define WM8776_REG_ADC_ALC1 0x10
55#define WM8776_REG_ADC_ALC2 0x11
56#define WM8776_REG_ADC_ALC3 0x12
57#define WM8776_REG_ADC_NOISE_GATE 0x13
58#define WM8776_REG_ADC_LIMITER 0x14
59#define WM8776_REG_ADC_MUX 0x15
60#define WM8776_REG_OUTPUT_MUX 0x16
61#define WM8776_REG_RESET 0x17
62
63#define WM8776_NUM_REGS 0x18
64
65/* clock ratio identifiers for snd_wm8776_set_rate() */
66#define WM8776_CLOCK_RATIO_128FS 0
67#define WM8776_CLOCK_RATIO_192FS 1
68#define WM8776_CLOCK_RATIO_256FS 2
69#define WM8776_CLOCK_RATIO_384FS 3
70#define WM8776_CLOCK_RATIO_512FS 4
71#define WM8776_CLOCK_RATIO_768FS 5
72
73enum { WM_VOL_HP, WM_VOL_DAC, WM_VOL_ADC, WM_NUM_VOLS };
74enum { WM_SW_DAC, WM_SW_BYPASS, WM_NUM_SWITCHES };
75
76struct snd_wm8776 {
77 unsigned char addr;
78 unsigned short regs[WM8776_NUM_REGS];
79 unsigned char volumes[WM_NUM_VOLS][2];
80 unsigned int switch_bits;
81};
82
83struct snd_maya44 {
84 struct snd_ice1712 *ice;
85 struct snd_wm8776 wm[2];
86 struct mutex mutex;
87};
88
89
90/* write the given register and save the data to the cache */
91static void wm8776_write(struct snd_ice1712 *ice, struct snd_wm8776 *wm,
92 unsigned char reg, unsigned short val)
93{
94 /*
95 * WM8776 registers are up to 9 bits wide, bit 8 is placed in the LSB
96 * of the address field
97 */
98 snd_vt1724_write_i2c(ice, wm->addr,
99 (reg << 1) | ((val >> 8) & 1),
100 val & 0xff);
101 wm->regs[reg] = val;
102}
103
104/*
105 * update the given register with and/or mask and save the data to the cache
106 */
107static int wm8776_write_bits(struct snd_ice1712 *ice, struct snd_wm8776 *wm,
108 unsigned char reg,
109 unsigned short mask, unsigned short val)
110{
111 val |= wm->regs[reg] & ~mask;
112 if (val != wm->regs[reg]) {
113 wm8776_write(ice, wm, reg, val);
114 return 1;
115 }
116 return 0;
117}
118
119
120/*
121 * WM8776 volume controls
122 */
123
124struct maya_vol_info {
125 unsigned int maxval; /* volume range: 0..maxval */
126 unsigned char regs[2]; /* left and right registers */
127 unsigned short mask; /* value mask */
128 unsigned short offset; /* zero-value offset */
129 unsigned short mute; /* mute bit */
130 unsigned short update; /* update bits */
131 unsigned char mux_bits[2]; /* extra bits for ADC mute */
132};
133
134static struct maya_vol_info vol_info[WM_NUM_VOLS] = {
135 [WM_VOL_HP] = {
136 .maxval = 80,
137 .regs = { WM8776_REG_HEADPHONE_L, WM8776_REG_HEADPHONE_R },
138 .mask = 0x7f,
139 .offset = 0x30,
140 .mute = 0x00,
141 .update = 0x180, /* update and zero-cross enable */
142 },
143 [WM_VOL_DAC] = {
144 .maxval = 255,
145 .regs = { WM8776_REG_DAC_ATTEN_L, WM8776_REG_DAC_ATTEN_R },
146 .mask = 0xff,
147 .offset = 0x01,
148 .mute = 0x00,
149 .update = 0x100, /* zero-cross enable */
150 },
151 [WM_VOL_ADC] = {
152 .maxval = 91,
153 .regs = { WM8776_REG_ADC_ATTEN_L, WM8776_REG_ADC_ATTEN_R },
154 .mask = 0xff,
155 .offset = 0xa5,
156 .mute = 0xa5,
157 .update = 0x100, /* update */
158 .mux_bits = { 0x80, 0x40 }, /* ADCMUX bits */
159 },
160};
161
162/*
163 * dB tables
164 */
165/* headphone output: mute, -73..+6db (1db step) */
166static const DECLARE_TLV_DB_SCALE(db_scale_hp, -7400, 100, 1);
167/* DAC output: mute, -127..0db (0.5db step) */
168static const DECLARE_TLV_DB_SCALE(db_scale_dac, -12750, 50, 1);
169/* ADC gain: mute, -21..+24db (0.5db step) */
170static const DECLARE_TLV_DB_SCALE(db_scale_adc, -2100, 50, 1);
171
172static int maya_vol_info(struct snd_kcontrol *kcontrol,
173 struct snd_ctl_elem_info *uinfo)
174{
175 unsigned int idx = kcontrol->private_value;
176 struct maya_vol_info *vol = &vol_info[idx];
177
178 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
179 uinfo->count = 2;
180 uinfo->value.integer.min = 0;
181 uinfo->value.integer.max = vol->maxval;
182 return 0;
183}
184
185static int maya_vol_get(struct snd_kcontrol *kcontrol,
186 struct snd_ctl_elem_value *ucontrol)
187{
188 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
189 struct snd_wm8776 *wm =
190 &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)];
191 unsigned int idx = kcontrol->private_value;
192
193 mutex_lock(&chip->mutex);
194 ucontrol->value.integer.value[0] = wm->volumes[idx][0];
195 ucontrol->value.integer.value[1] = wm->volumes[idx][1];
196 mutex_unlock(&chip->mutex);
197 return 0;
198}
199
200static int maya_vol_put(struct snd_kcontrol *kcontrol,
201 struct snd_ctl_elem_value *ucontrol)
202{
203 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
204 struct snd_wm8776 *wm =
205 &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)];
206 unsigned int idx = kcontrol->private_value;
207 struct maya_vol_info *vol = &vol_info[idx];
208 unsigned int val, data;
209 int ch, changed = 0;
210
211 mutex_lock(&chip->mutex);
212 for (ch = 0; ch < 2; ch++) {
213 val = ucontrol->value.integer.value[ch];
214 if (val > vol->maxval)
215 val = vol->maxval;
216 if (val == wm->volumes[idx][ch])
217 continue;
218 if (!val)
219 data = vol->mute;
220 else
221 data = (val - 1) + vol->offset;
222 data |= vol->update;
223 changed |= wm8776_write_bits(chip->ice, wm, vol->regs[ch],
224 vol->mask | vol->update, data);
225 if (vol->mux_bits[ch])
226 wm8776_write_bits(chip->ice, wm, WM8776_REG_ADC_MUX,
227 vol->mux_bits[ch],
228 val ? 0 : vol->mux_bits[ch]);
229 wm->volumes[idx][ch] = val;
230 }
231 mutex_unlock(&chip->mutex);
232 return changed;
233}
234
235/*
236 * WM8776 switch controls
237 */
238
239#define COMPOSE_SW_VAL(idx, reg, mask) ((idx) | ((reg) << 8) | ((mask) << 16))
240#define GET_SW_VAL_IDX(val) ((val) & 0xff)
241#define GET_SW_VAL_REG(val) (((val) >> 8) & 0xff)
242#define GET_SW_VAL_MASK(val) (((val) >> 16) & 0xff)
243
244#define maya_sw_info snd_ctl_boolean_mono_info
245
246static int maya_sw_get(struct snd_kcontrol *kcontrol,
247 struct snd_ctl_elem_value *ucontrol)
248{
249 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
250 struct snd_wm8776 *wm =
251 &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)];
252 unsigned int idx = GET_SW_VAL_IDX(kcontrol->private_value);
253
254 ucontrol->value.integer.value[0] = (wm->switch_bits >> idx) & 1;
255 return 0;
256}
257
258static int maya_sw_put(struct snd_kcontrol *kcontrol,
259 struct snd_ctl_elem_value *ucontrol)
260{
261 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
262 struct snd_wm8776 *wm =
263 &chip->wm[snd_ctl_get_ioff(kcontrol, &ucontrol->id)];
264 unsigned int idx = GET_SW_VAL_IDX(kcontrol->private_value);
265 unsigned int mask, val;
266 int changed;
267
268 mutex_lock(&chip->mutex);
269 mask = 1 << idx;
270 wm->switch_bits &= ~mask;
271 val = ucontrol->value.integer.value[0];
272 if (val)
273 wm->switch_bits |= mask;
274 mask = GET_SW_VAL_MASK(kcontrol->private_value);
275 changed = wm8776_write_bits(chip->ice, wm,
276 GET_SW_VAL_REG(kcontrol->private_value),
277 mask, val ? mask : 0);
278 mutex_unlock(&chip->mutex);
279 return changed;
280}
281
282/*
283 * GPIO pins (known ones for maya44)
284 */
285#define GPIO_PHANTOM_OFF 2
286#define GPIO_MIC_RELAY 4
287#define GPIO_SPDIF_IN_INV 5
288#define GPIO_MUST_BE_0 7
289
290/*
291 * GPIO switch controls
292 */
293
294#define COMPOSE_GPIO_VAL(shift, inv) ((shift) | ((inv) << 8))
295#define GET_GPIO_VAL_SHIFT(val) ((val) & 0xff)
296#define GET_GPIO_VAL_INV(val) (((val) >> 8) & 1)
297
298static int maya_set_gpio_bits(struct snd_ice1712 *ice, unsigned int mask,
299 unsigned int bits)
300{
301 unsigned int data;
302 data = snd_ice1712_gpio_read(ice);
303 if ((data & mask) == bits)
304 return 0;
305 snd_ice1712_gpio_write(ice, (data & ~mask) | bits);
306 return 1;
307}
308
309#define maya_gpio_sw_info snd_ctl_boolean_mono_info
310
311static int maya_gpio_sw_get(struct snd_kcontrol *kcontrol,
312 struct snd_ctl_elem_value *ucontrol)
313{
314 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
315 unsigned int shift = GET_GPIO_VAL_SHIFT(kcontrol->private_value);
316 unsigned int val;
317
318 val = (snd_ice1712_gpio_read(chip->ice) >> shift) & 1;
319 if (GET_GPIO_VAL_INV(kcontrol->private_value))
320 val = !val;
321 ucontrol->value.integer.value[0] = val;
322 return 0;
323}
324
325static int maya_gpio_sw_put(struct snd_kcontrol *kcontrol,
326 struct snd_ctl_elem_value *ucontrol)
327{
328 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
329 unsigned int shift = GET_GPIO_VAL_SHIFT(kcontrol->private_value);
330 unsigned int val, mask;
331 int changed;
332
333 mutex_lock(&chip->mutex);
334 mask = 1 << shift;
335 val = ucontrol->value.integer.value[0];
336 if (GET_GPIO_VAL_INV(kcontrol->private_value))
337 val = !val;
338 val = val ? mask : 0;
339 changed = maya_set_gpio_bits(chip->ice, mask, val);
340 mutex_unlock(&chip->mutex);
341 return changed;
342}
343
344/*
345 * capture source selection
346 */
347
348/* known working input slots (0-4) */
349#define MAYA_LINE_IN 1 /* in-2 */
350#define MAYA_MIC_IN 4 /* in-5 */
351
352static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line)
353{
354 wm8776_write_bits(chip->ice, &chip->wm[idx], WM8776_REG_ADC_MUX,
355 0x1f, 1 << line);
356}
357
358static int maya_rec_src_info(struct snd_kcontrol *kcontrol,
359 struct snd_ctl_elem_info *uinfo)
360{
361 static char *texts[] = { "Line", "Mic" };
362
363 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
364 uinfo->count = 1;
365 uinfo->value.enumerated.items = ARRAY_SIZE(texts);
366 if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
367 uinfo->value.enumerated.item =
368 uinfo->value.enumerated.items - 1;
369 strcpy(uinfo->value.enumerated.name,
370 texts[uinfo->value.enumerated.item]);
371 return 0;
372}
373
374static int maya_rec_src_get(struct snd_kcontrol *kcontrol,
375 struct snd_ctl_elem_value *ucontrol)
376{
377 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
378 int sel;
379
380 if (snd_ice1712_gpio_read(chip->ice) & (1 << GPIO_MIC_RELAY))
381 sel = 1;
382 else
383 sel = 0;
384 ucontrol->value.enumerated.item[0] = sel;
385 return 0;
386}
387
388static int maya_rec_src_put(struct snd_kcontrol *kcontrol,
389 struct snd_ctl_elem_value *ucontrol)
390{
391 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
392 int sel = ucontrol->value.enumerated.item[0];
393 int changed;
394
395 mutex_lock(&chip->mutex);
396 changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY,
397 sel ? GPIO_MIC_RELAY : 0);
398 wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN);
399 mutex_unlock(&chip->mutex);
400 return changed;
401}
402
403/*
404 * Maya44 routing switch settings have different meanings than the standard
405 * ice1724 switches as defined in snd_vt1724_pro_route_info (ice1724.c).
406 */
407static int maya_pb_route_info(struct snd_kcontrol *kcontrol,
408 struct snd_ctl_elem_info *uinfo)
409{
410 static char *texts[] = {
411 "PCM Out", /* 0 */
412 "Input 1", "Input 2", "Input 3", "Input 4"
413 };
414
415 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
416 uinfo->count = 1;
417 uinfo->value.enumerated.items = ARRAY_SIZE(texts);
418 if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
419 uinfo->value.enumerated.item =
420 uinfo->value.enumerated.items - 1;
421 strcpy(uinfo->value.enumerated.name,
422 texts[uinfo->value.enumerated.item]);
423 return 0;
424}
425
426static int maya_pb_route_shift(int idx)
427{
428 static const unsigned char shift[10] =
429 { 8, 20, 0, 3, 11, 23, 14, 26, 17, 29 };
430 return shift[idx % 10];
431}
432
433static int maya_pb_route_get(struct snd_kcontrol *kcontrol,
434 struct snd_ctl_elem_value *ucontrol)
435{
436 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
437 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
438 ucontrol->value.enumerated.item[0] =
439 snd_ice1724_get_route_val(chip->ice, maya_pb_route_shift(idx));
440 return 0;
441}
442
443static int maya_pb_route_put(struct snd_kcontrol *kcontrol,
444 struct snd_ctl_elem_value *ucontrol)
445{
446 struct snd_maya44 *chip = snd_kcontrol_chip(kcontrol);
447 int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
448 return snd_ice1724_put_route_val(chip->ice,
449 ucontrol->value.enumerated.item[0],
450 maya_pb_route_shift(idx));
451}
452
453
454/*
455 * controls to be added
456 */
457
458static struct snd_kcontrol_new maya_controls[] __devinitdata = {
459 {
460 .name = "Crossmix Playback Volume",
461 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
462 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
463 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
464 .info = maya_vol_info,
465 .get = maya_vol_get,
466 .put = maya_vol_put,
467 .tlv = { .p = db_scale_hp },
468 .private_value = WM_VOL_HP,
469 .count = 2,
470 },
471 {
472 .name = "PCM Playback Volume",
473 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
474 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
475 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
476 .info = maya_vol_info,
477 .get = maya_vol_get,
478 .put = maya_vol_put,
479 .tlv = { .p = db_scale_dac },
480 .private_value = WM_VOL_DAC,
481 .count = 2,
482 },
483 {
484 .name = "Line Capture Volume",
485 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
486 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
487 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
488 .info = maya_vol_info,
489 .get = maya_vol_get,
490 .put = maya_vol_put,
491 .tlv = { .p = db_scale_adc },
492 .private_value = WM_VOL_ADC,
493 .count = 2,
494 },
495 {
496 .name = "PCM Playback Switch",
497 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
498 .info = maya_sw_info,
499 .get = maya_sw_get,
500 .put = maya_sw_put,
501 .private_value = COMPOSE_SW_VAL(WM_SW_DAC,
502 WM8776_REG_OUTPUT_MUX, 0x01),
503 .count = 2,
504 },
505 {
506 .name = "Bypass Playback Switch",
507 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
508 .info = maya_sw_info,
509 .get = maya_sw_get,
510 .put = maya_sw_put,
511 .private_value = COMPOSE_SW_VAL(WM_SW_BYPASS,
512 WM8776_REG_OUTPUT_MUX, 0x04),
513 .count = 2,
514 },
515 {
516 .name = "Capture Source",
517 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
518 .info = maya_rec_src_info,
519 .get = maya_rec_src_get,
520 .put = maya_rec_src_put,
521 },
522 {
523 .name = "Mic Phantom Power Switch",
524 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
525 .info = maya_gpio_sw_info,
526 .get = maya_gpio_sw_get,
527 .put = maya_gpio_sw_put,
528 .private_value = COMPOSE_GPIO_VAL(GPIO_PHANTOM_OFF, 1),
529 },
530 {
531 .name = "SPDIF Capture Switch",
532 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
533 .info = maya_gpio_sw_info,
534 .get = maya_gpio_sw_get,
535 .put = maya_gpio_sw_put,
536 .private_value = COMPOSE_GPIO_VAL(GPIO_SPDIF_IN_INV, 1),
537 },
538 {
539 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
540 .name = "H/W Playback Route",
541 .info = maya_pb_route_info,
542 .get = maya_pb_route_get,
543 .put = maya_pb_route_put,
544 .count = 4, /* FIXME: do controls 5-9 have any meaning? */
545 },
546};
547
548static int __devinit maya44_add_controls(struct snd_ice1712 *ice)
549{
550 int err, i;
551
552 for (i = 0; i < ARRAY_SIZE(maya_controls); i++) {
553 err = snd_ctl_add(ice->card, snd_ctl_new1(&maya_controls[i],
554 ice->spec));
555 if (err < 0)
556 return err;
557 }
558 return 0;
559}
560
561
562/*
563 * initialize a wm8776 chip
564 */
565static void __devinit wm8776_init(struct snd_ice1712 *ice,
566 struct snd_wm8776 *wm, unsigned int addr)
567{
568 static const unsigned short inits_wm8776[] = {
569 0x02, 0x100, /* R2: headphone L+R muted + update */
570 0x05, 0x100, /* R5: DAC output L+R muted + update */
571 0x06, 0x000, /* R6: DAC output phase normal */
572 0x07, 0x091, /* R7: DAC enable zero cross detection,
573 normal output */
574 0x08, 0x000, /* R8: DAC soft mute off */
575 0x09, 0x000, /* R9: no deemph, DAC zero detect disabled */
576 0x0a, 0x022, /* R10: DAC I2C mode, std polarities, 24bit */
577 0x0b, 0x022, /* R11: ADC I2C mode, std polarities, 24bit,
578 highpass filter enabled */
579 0x0c, 0x042, /* R12: ADC+DAC slave, ADC+DAC 44,1kHz */
580 0x0d, 0x000, /* R13: all power up */
581 0x0e, 0x100, /* R14: ADC left muted,
582 enable zero cross detection */
583 0x0f, 0x100, /* R15: ADC right muted,
584 enable zero cross detection */
585 /* R16: ALC...*/
586 0x11, 0x000, /* R17: disable ALC */
587 /* R18: ALC...*/
588 /* R19: noise gate...*/
589 0x15, 0x000, /* R21: ADC input mux init, mute all inputs */
590 0x16, 0x001, /* R22: output mux, select DAC */
591 0xff, 0xff
592 };
593
594 const unsigned short *ptr;
595 unsigned char reg;
596 unsigned short data;
597
598 wm->addr = addr;
599 /* enable DAC output; mute bypass, aux & all inputs */
600 wm->switch_bits = (1 << WM_SW_DAC);
601
602 ptr = inits_wm8776;
603 while (*ptr != 0xff) {
604 reg = *ptr++;
605 data = *ptr++;
606 wm8776_write(ice, wm, reg, data);
607 }
608}
609
610
611/*
612 * change the rate on the WM8776 codecs.
613 * this assumes that the VT17xx's rate is changed by the calling function.
614 * NOTE: even though the WM8776's are running in slave mode and rate
615 * selection is automatic, we need to call snd_wm8776_set_rate() here
616 * to make sure some flags are set correctly.
617 */
618static void set_rate(struct snd_ice1712 *ice, unsigned int rate)
619{
620 struct snd_maya44 *chip = ice->spec;
621 unsigned int ratio, adc_ratio, val;
622 int i;
623
624 switch (rate) {
625 case 192000:
626 ratio = WM8776_CLOCK_RATIO_128FS;
627 break;
628 case 176400:
629 ratio = WM8776_CLOCK_RATIO_128FS;
630 break;
631 case 96000:
632 ratio = WM8776_CLOCK_RATIO_256FS;
633 break;
634 case 88200:
635 ratio = WM8776_CLOCK_RATIO_384FS;
636 break;
637 case 48000:
638 ratio = WM8776_CLOCK_RATIO_512FS;
639 break;
640 case 44100:
641 ratio = WM8776_CLOCK_RATIO_512FS;
642 break;
643 case 32000:
644 ratio = WM8776_CLOCK_RATIO_768FS;
645 break;
646 case 0:
647 /* no hint - S/PDIF input is master, simply return */
648 return;
649 default:
650 snd_BUG();
651 return;
652 }
653
654 /*
655 * this currently sets the same rate for ADC and DAC, but limits
656 * ADC rate to 256X (96kHz). For 256X mode (96kHz), this sets ADC
657 * oversampling to 64x, as recommended by WM8776 datasheet.
658 * Setting the rate is not really necessary in slave mode.
659 */
660 adc_ratio = ratio;
661 if (adc_ratio < WM8776_CLOCK_RATIO_256FS)
662 adc_ratio = WM8776_CLOCK_RATIO_256FS;
663
664 val = adc_ratio;
665 if (adc_ratio == WM8776_CLOCK_RATIO_256FS)
666 val |= 8;
667 val |= ratio << 4;
668
669 mutex_lock(&chip->mutex);
670 for (i = 0; i < 2; i++)
671 wm8776_write_bits(ice, &chip->wm[i],
672 WM8776_REG_MASTER_MODE_CONTROL,
673 0x180, val);
674 mutex_unlock(&chip->mutex);
675}
676
677/*
678 * supported sample rates (to override the default one)
679 */
680
681static unsigned int rates[] = {
682 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000
683};
684
685/* playback rates: 32..192 kHz */
686static struct snd_pcm_hw_constraint_list dac_rates = {
687 .count = ARRAY_SIZE(rates),
688 .list = rates,
689 .mask = 0
690};
691
692
693/*
694 * chip addresses on I2C bus
695 */
696static unsigned char wm8776_addr[2] __devinitdata = {
697 0x34, 0x36, /* codec 0 & 1 */
698};
699
700/*
701 * initialize the chip
702 */
703static int __devinit maya44_init(struct snd_ice1712 *ice)
704{
705 int i;
706 struct snd_maya44 *chip;
707
708 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
709 if (!chip)
710 return -ENOMEM;
711 mutex_init(&chip->mutex);
712 chip->ice = ice;
713 ice->spec = chip;
714
715 /* initialise codecs */
716 ice->num_total_dacs = 4;
717 ice->num_total_adcs = 4;
718 ice->akm_codecs = 0;
719
720 for (i = 0; i < 2; i++) {
721 wm8776_init(ice, &chip->wm[i], wm8776_addr[i]);
722 wm8776_select_input(chip, i, MAYA_LINE_IN);
723 }
724
725 /* set card specific rates */
726 ice->hw_rates = &dac_rates;
727
728 /* register change rate notifier */
729 ice->gpio.set_pro_rate = set_rate;
730
731 /* RDMA1 (2nd input channel) is used for ADC by default */
732 ice->force_rdma1 = 1;
733
734 /* have an own routing control */
735 ice->own_routing = 1;
736
737 return 0;
738}
739
740
741/*
742 * Maya44 boards don't provide the EEPROM data except for the vendor IDs.
743 * hence the driver needs to sets up it properly.
744 */
745
746static unsigned char maya44_eeprom[] __devinitdata = {
747 [ICE_EEP2_SYSCONF] = 0x45,
748 /* clock xin1=49.152MHz, mpu401, 2 stereo ADCs+DACs */
749 [ICE_EEP2_ACLINK] = 0x80,
750 /* I2S */
751 [ICE_EEP2_I2S] = 0xf8,
752 /* vol, 96k, 24bit, 192k */
753 [ICE_EEP2_SPDIF] = 0xc3,
754 /* enable spdif out, spdif out supp, spdif-in, ext spdif out */
755 [ICE_EEP2_GPIO_DIR] = 0xff,
756 [ICE_EEP2_GPIO_DIR1] = 0xff,
757 [ICE_EEP2_GPIO_DIR2] = 0xff,
758 [ICE_EEP2_GPIO_MASK] = 0/*0x9f*/,
759 [ICE_EEP2_GPIO_MASK1] = 0/*0xff*/,
760 [ICE_EEP2_GPIO_MASK2] = 0/*0x7f*/,
761 [ICE_EEP2_GPIO_STATE] = (1 << GPIO_PHANTOM_OFF) |
762 (1 << GPIO_SPDIF_IN_INV),
763 [ICE_EEP2_GPIO_STATE1] = 0x00,
764 [ICE_EEP2_GPIO_STATE2] = 0x00,
765};
766
767/* entry point */
768struct snd_ice1712_card_info snd_vt1724_maya44_cards[] __devinitdata = {
769 {
770 .subvendor = VT1724_SUBDEVICE_MAYA44,
771 .name = "ESI Maya44",
772 .model = "maya44",
773 .chip_init = maya44_init,
774 .build_controls = maya44_add_controls,
775 .eeprom_size = sizeof(maya44_eeprom),
776 .eeprom_data = maya44_eeprom,
777 },
778 { } /* terminator */
779};
diff --git a/sound/pci/ice1712/maya44.h b/sound/pci/ice1712/maya44.h
new file mode 100644
index 000000000000..eafd03a8f4b5
--- /dev/null
+++ b/sound/pci/ice1712/maya44.h
@@ -0,0 +1,10 @@
1#ifndef __SOUND_MAYA44_H
2#define __SOUND_MAYA44_H
3
4#define MAYA44_DEVICE_DESC "{ESI,Maya44},"
5
6#define VT1724_SUBDEVICE_MAYA44 0x34315441 /* Maya44 */
7
8extern struct snd_ice1712_card_info snd_vt1724_maya44_cards[];
9
10#endif /* __SOUND_MAYA44_H */
diff --git a/sound/pci/lx6464es/Makefile b/sound/pci/lx6464es/Makefile
new file mode 100644
index 000000000000..eb04a6c73d8b
--- /dev/null
+++ b/sound/pci/lx6464es/Makefile
@@ -0,0 +1,2 @@
1snd-lx6464es-objs := lx6464es.o lx_core.o
2obj-$(CONFIG_SND_LX6464ES) += snd-lx6464es.o
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
new file mode 100644
index 000000000000..ccf1b38c88ea
--- /dev/null
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -0,0 +1,1159 @@
1/* -*- linux-c -*- *
2 *
3 * ALSA driver for the digigram lx6464es interface
4 *
5 * Copyright (c) 2008, 2009 Tim Blechmann <tim@klingt.org>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
22 *
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/delay.h>
29
30#include <sound/initval.h>
31#include <sound/control.h>
32#include <sound/info.h>
33
34#include "lx6464es.h"
35
36MODULE_AUTHOR("Tim Blechmann");
37MODULE_LICENSE("GPL");
38MODULE_DESCRIPTION("digigram lx6464es");
39MODULE_SUPPORTED_DEVICE("{digigram lx6464es{}}");
40
41
42static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
43static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
44static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
45
46module_param_array(index, int, NULL, 0444);
47MODULE_PARM_DESC(index, "Index value for Digigram LX6464ES interface.");
48module_param_array(id, charp, NULL, 0444);
49MODULE_PARM_DESC(id, "ID string for Digigram LX6464ES interface.");
50module_param_array(enable, bool, NULL, 0444);
51MODULE_PARM_DESC(enable, "Enable/disable specific Digigram LX6464ES soundcards.");
52
53static const char card_name[] = "LX6464ES";
54
55
56#define PCI_DEVICE_ID_PLX_LX6464ES PCI_DEVICE_ID_PLX_9056
57
58static struct pci_device_id snd_lx6464es_ids[] = {
59 { PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES),
60 .subvendor = PCI_VENDOR_ID_DIGIGRAM,
61 .subdevice = PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM
62 }, /* LX6464ES */
63 { PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES),
64 .subvendor = PCI_VENDOR_ID_DIGIGRAM,
65 .subdevice = PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM
66 }, /* LX6464ES-CAE */
67 { 0, },
68};
69
70MODULE_DEVICE_TABLE(pci, snd_lx6464es_ids);
71
72
73
74/* PGO pour USERo dans le registre pci_0x06/loc_0xEC */
75#define CHIPSC_RESET_XILINX (1L<<16)
76
77
78/* alsa callbacks */
79static struct snd_pcm_hardware lx_caps = {
80 .info = (SNDRV_PCM_INFO_MMAP |
81 SNDRV_PCM_INFO_INTERLEAVED |
82 SNDRV_PCM_INFO_MMAP_VALID |
83 SNDRV_PCM_INFO_SYNC_START),
84 .formats = (SNDRV_PCM_FMTBIT_S16_LE |
85 SNDRV_PCM_FMTBIT_S16_BE |
86 SNDRV_PCM_FMTBIT_S24_3LE |
87 SNDRV_PCM_FMTBIT_S24_3BE),
88 .rates = (SNDRV_PCM_RATE_CONTINUOUS |
89 SNDRV_PCM_RATE_8000_192000),
90 .rate_min = 8000,
91 .rate_max = 192000,
92 .channels_min = 2,
93 .channels_max = 64,
94 .buffer_bytes_max = 64*2*3*MICROBLAZE_IBL_MAX*MAX_STREAM_BUFFER,
95 .period_bytes_min = (2*2*MICROBLAZE_IBL_MIN*2),
96 .period_bytes_max = (4*64*MICROBLAZE_IBL_MAX*MAX_STREAM_BUFFER),
97 .periods_min = 2,
98 .periods_max = MAX_STREAM_BUFFER,
99};
100
101static int lx_set_granularity(struct lx6464es *chip, u32 gran);
102
103
104static int lx_hardware_open(struct lx6464es *chip,
105 struct snd_pcm_substream *substream)
106{
107 int err = 0;
108 struct snd_pcm_runtime *runtime = substream->runtime;
109 int channels = runtime->channels;
110 int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
111
112 snd_pcm_uframes_t period_size = runtime->period_size;
113
114 snd_printd(LXP "allocating pipe for %d channels\n", channels);
115 err = lx_pipe_allocate(chip, 0, is_capture, channels);
116 if (err < 0) {
117 snd_printk(KERN_ERR LXP "allocating pipe failed\n");
118 return err;
119 }
120
121 err = lx_set_granularity(chip, period_size);
122 if (err < 0) {
123 snd_printk(KERN_ERR LXP "setting granularity to %ld failed\n",
124 period_size);
125 return err;
126 }
127
128 return 0;
129}
130
131static int lx_hardware_start(struct lx6464es *chip,
132 struct snd_pcm_substream *substream)
133{
134 int err = 0;
135 struct snd_pcm_runtime *runtime = substream->runtime;
136 int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
137
138 snd_printd(LXP "setting stream format\n");
139 err = lx_stream_set_format(chip, runtime, 0, is_capture);
140 if (err < 0) {
141 snd_printk(KERN_ERR LXP "setting stream format failed\n");
142 return err;
143 }
144
145 snd_printd(LXP "starting pipe\n");
146 err = lx_pipe_start(chip, 0, is_capture);
147 if (err < 0) {
148 snd_printk(KERN_ERR LXP "starting pipe failed\n");
149 return err;
150 }
151
152 snd_printd(LXP "waiting for pipe to start\n");
153 err = lx_pipe_wait_for_start(chip, 0, is_capture);
154 if (err < 0) {
155 snd_printk(KERN_ERR LXP "waiting for pipe failed\n");
156 return err;
157 }
158
159 return err;
160}
161
162
163static int lx_hardware_stop(struct lx6464es *chip,
164 struct snd_pcm_substream *substream)
165{
166 int err = 0;
167 int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
168
169 snd_printd(LXP "pausing pipe\n");
170 err = lx_pipe_pause(chip, 0, is_capture);
171 if (err < 0) {
172 snd_printk(KERN_ERR LXP "pausing pipe failed\n");
173 return err;
174 }
175
176 snd_printd(LXP "waiting for pipe to become idle\n");
177 err = lx_pipe_wait_for_idle(chip, 0, is_capture);
178 if (err < 0) {
179 snd_printk(KERN_ERR LXP "waiting for pipe failed\n");
180 return err;
181 }
182
183 snd_printd(LXP "stopping pipe\n");
184 err = lx_pipe_stop(chip, 0, is_capture);
185 if (err < 0) {
186 snd_printk(LXP "stopping pipe failed\n");
187 return err;
188 }
189
190 return err;
191}
192
193
194static int lx_hardware_close(struct lx6464es *chip,
195 struct snd_pcm_substream *substream)
196{
197 int err = 0;
198 int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
199
200 snd_printd(LXP "releasing pipe\n");
201 err = lx_pipe_release(chip, 0, is_capture);
202 if (err < 0) {
203 snd_printk(LXP "releasing pipe failed\n");
204 return err;
205 }
206
207 return err;
208}
209
210
211static int lx_pcm_open(struct snd_pcm_substream *substream)
212{
213 struct lx6464es *chip = snd_pcm_substream_chip(substream);
214 struct snd_pcm_runtime *runtime = substream->runtime;
215 int err = 0;
216 int board_rate;
217
218 snd_printdd("->lx_pcm_open\n");
219 mutex_lock(&chip->setup_mutex);
220
221 /* copy the struct snd_pcm_hardware struct */
222 runtime->hw = lx_caps;
223
224#if 0
225 /* buffer-size should better be multiple of period-size */
226 err = snd_pcm_hw_constraint_integer(runtime,
227 SNDRV_PCM_HW_PARAM_PERIODS);
228 if (err < 0) {
229 snd_printk(KERN_WARNING LXP "could not constrain periods\n");
230 goto exit;
231 }
232#endif
233
234 /* the clock rate cannot be changed */
235 board_rate = chip->board_sample_rate;
236 err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
237 board_rate, board_rate);
238
239 if (err < 0) {
240 snd_printk(KERN_WARNING LXP "could not constrain periods\n");
241 goto exit;
242 }
243
244 /* constrain period size */
245 err = snd_pcm_hw_constraint_minmax(runtime,
246 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
247 MICROBLAZE_IBL_MIN,
248 MICROBLAZE_IBL_MAX);
249 if (err < 0) {
250 snd_printk(KERN_WARNING LXP
251 "could not constrain period size\n");
252 goto exit;
253 }
254
255 snd_pcm_hw_constraint_step(runtime, 0,
256 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
257
258 snd_pcm_set_sync(substream);
259 err = 0;
260
261exit:
262 runtime->private_data = chip;
263
264 mutex_unlock(&chip->setup_mutex);
265 snd_printdd("<-lx_pcm_open, %d\n", err);
266 return err;
267}
268
269static int lx_pcm_close(struct snd_pcm_substream *substream)
270{
271 int err = 0;
272 snd_printdd("->lx_pcm_close\n");
273 return err;
274}
275
276static snd_pcm_uframes_t lx_pcm_stream_pointer(struct snd_pcm_substream
277 *substream)
278{
279 struct lx6464es *chip = snd_pcm_substream_chip(substream);
280 snd_pcm_uframes_t pos;
281 unsigned long flags;
282 int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
283
284 struct lx_stream *lx_stream = is_capture ? &chip->capture_stream :
285 &chip->playback_stream;
286
287 snd_printdd("->lx_pcm_stream_pointer\n");
288
289 spin_lock_irqsave(&chip->lock, flags);
290 pos = lx_stream->frame_pos * substream->runtime->period_size;
291 spin_unlock_irqrestore(&chip->lock, flags);
292
293 snd_printdd(LXP "stream_pointer at %ld\n", pos);
294 return pos;
295}
296
297static int lx_pcm_prepare(struct snd_pcm_substream *substream)
298{
299 struct lx6464es *chip = snd_pcm_substream_chip(substream);
300 int err = 0;
301 const int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
302
303 snd_printdd("->lx_pcm_prepare\n");
304
305 mutex_lock(&chip->setup_mutex);
306
307 if (chip->hardware_running[is_capture]) {
308 err = lx_hardware_stop(chip, substream);
309 if (err < 0) {
310 snd_printk(KERN_ERR LXP "failed to stop hardware. "
311 "Error code %d\n", err);
312 goto exit;
313 }
314
315 err = lx_hardware_close(chip, substream);
316 if (err < 0) {
317 snd_printk(KERN_ERR LXP "failed to close hardware. "
318 "Error code %d\n", err);
319 goto exit;
320 }
321 }
322
323 snd_printd(LXP "opening hardware\n");
324 err = lx_hardware_open(chip, substream);
325 if (err < 0) {
326 snd_printk(KERN_ERR LXP "failed to open hardware. "
327 "Error code %d\n", err);
328 goto exit;
329 }
330
331 err = lx_hardware_start(chip, substream);
332 if (err < 0) {
333 snd_printk(KERN_ERR LXP "failed to start hardware. "
334 "Error code %d\n", err);
335 goto exit;
336 }
337
338 chip->hardware_running[is_capture] = 1;
339
340 if (chip->board_sample_rate != substream->runtime->rate) {
341 if (!err)
342 chip->board_sample_rate = substream->runtime->rate;
343 }
344
345exit:
346 mutex_unlock(&chip->setup_mutex);
347 return err;
348}
349
350static int lx_pcm_hw_params(struct snd_pcm_substream *substream,
351 struct snd_pcm_hw_params *hw_params, int is_capture)
352{
353 struct lx6464es *chip = snd_pcm_substream_chip(substream);
354 int err = 0;
355
356 snd_printdd("->lx_pcm_hw_params\n");
357
358 mutex_lock(&chip->setup_mutex);
359
360 /* set dma buffer */
361 err = snd_pcm_lib_malloc_pages(substream,
362 params_buffer_bytes(hw_params));
363
364 if (is_capture)
365 chip->capture_stream.stream = substream;
366 else
367 chip->playback_stream.stream = substream;
368
369 mutex_unlock(&chip->setup_mutex);
370 return err;
371}
372
373static int lx_pcm_hw_params_playback(struct snd_pcm_substream *substream,
374 struct snd_pcm_hw_params *hw_params)
375{
376 return lx_pcm_hw_params(substream, hw_params, 0);
377}
378
379static int lx_pcm_hw_params_capture(struct snd_pcm_substream *substream,
380 struct snd_pcm_hw_params *hw_params)
381{
382 return lx_pcm_hw_params(substream, hw_params, 1);
383}
384
385static int lx_pcm_hw_free(struct snd_pcm_substream *substream)
386{
387 struct lx6464es *chip = snd_pcm_substream_chip(substream);
388 int err = 0;
389 int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
390
391 snd_printdd("->lx_pcm_hw_free\n");
392 mutex_lock(&chip->setup_mutex);
393
394 if (chip->hardware_running[is_capture]) {
395 err = lx_hardware_stop(chip, substream);
396 if (err < 0) {
397 snd_printk(KERN_ERR LXP "failed to stop hardware. "
398 "Error code %d\n", err);
399 goto exit;
400 }
401
402 err = lx_hardware_close(chip, substream);
403 if (err < 0) {
404 snd_printk(KERN_ERR LXP "failed to close hardware. "
405 "Error code %d\n", err);
406 goto exit;
407 }
408
409 chip->hardware_running[is_capture] = 0;
410 }
411
412 err = snd_pcm_lib_free_pages(substream);
413
414 if (is_capture)
415 chip->capture_stream.stream = 0;
416 else
417 chip->playback_stream.stream = 0;
418
419exit:
420 mutex_unlock(&chip->setup_mutex);
421 return err;
422}
423
424static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
425{
426 struct snd_pcm_substream *substream = lx_stream->stream;
427 const int is_capture = lx_stream->is_capture;
428
429 int err;
430
431 const u32 channels = substream->runtime->channels;
432 const u32 bytes_per_frame = channels * 3;
433 const u32 period_size = substream->runtime->period_size;
434 const u32 periods = substream->runtime->periods;
435 const u32 period_bytes = period_size * bytes_per_frame;
436
437 dma_addr_t buf = substream->dma_buffer.addr;
438 int i;
439
440 u32 needed, freed;
441 u32 size_array[5];
442
443 for (i = 0; i != periods; ++i) {
444 u32 buffer_index = 0;
445
446 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed,
447 size_array);
448 snd_printdd(LXP "starting: needed %d, freed %d\n",
449 needed, freed);
450
451 err = lx_buffer_give(chip, 0, is_capture, period_bytes,
452 lower_32_bits(buf), upper_32_bits(buf),
453 &buffer_index);
454
455 snd_printdd(LXP "starting: buffer index %x on %p (%d bytes)\n",
456 buffer_index, (void *)buf, period_bytes);
457 buf += period_bytes;
458 }
459
460 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
461 snd_printdd(LXP "starting: needed %d, freed %d\n", needed, freed);
462
463 snd_printd(LXP "starting: starting stream\n");
464 err = lx_stream_start(chip, 0, is_capture);
465 if (err < 0)
466 snd_printk(KERN_ERR LXP "couldn't start stream\n");
467 else
468 lx_stream->status = LX_STREAM_STATUS_RUNNING;
469
470 lx_stream->frame_pos = 0;
471}
472
473static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
474{
475 const int is_capture = lx_stream->is_capture;
476 int err;
477
478 snd_printd(LXP "stopping: stopping stream\n");
479 err = lx_stream_stop(chip, 0, is_capture);
480 if (err < 0)
481 snd_printk(KERN_ERR LXP "couldn't stop stream\n");
482 else
483 lx_stream->status = LX_STREAM_STATUS_FREE;
484
485}
486
487static void lx_trigger_tasklet_dispatch_stream(struct lx6464es *chip,
488 struct lx_stream *lx_stream)
489{
490 switch (lx_stream->status) {
491 case LX_STREAM_STATUS_SCHEDULE_RUN:
492 lx_trigger_start(chip, lx_stream);
493 break;
494
495 case LX_STREAM_STATUS_SCHEDULE_STOP:
496 lx_trigger_stop(chip, lx_stream);
497 break;
498
499 default:
500 break;
501 }
502}
503
504static void lx_trigger_tasklet(unsigned long data)
505{
506 struct lx6464es *chip = (struct lx6464es *)data;
507 unsigned long flags;
508
509 snd_printdd("->lx_trigger_tasklet\n");
510
511 spin_lock_irqsave(&chip->lock, flags);
512 lx_trigger_tasklet_dispatch_stream(chip, &chip->capture_stream);
513 lx_trigger_tasklet_dispatch_stream(chip, &chip->playback_stream);
514 spin_unlock_irqrestore(&chip->lock, flags);
515}
516
517static int lx_pcm_trigger_dispatch(struct lx6464es *chip,
518 struct lx_stream *lx_stream, int cmd)
519{
520 int err = 0;
521
522 switch (cmd) {
523 case SNDRV_PCM_TRIGGER_START:
524 lx_stream->status = LX_STREAM_STATUS_SCHEDULE_RUN;
525 break;
526
527 case SNDRV_PCM_TRIGGER_STOP:
528 lx_stream->status = LX_STREAM_STATUS_SCHEDULE_STOP;
529 break;
530
531 default:
532 err = -EINVAL;
533 goto exit;
534 }
535 tasklet_schedule(&chip->trigger_tasklet);
536
537exit:
538 return err;
539}
540
541
542static int lx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
543{
544 struct lx6464es *chip = snd_pcm_substream_chip(substream);
545 const int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
546 struct lx_stream *stream = is_capture ? &chip->capture_stream :
547 &chip->playback_stream;
548
549 snd_printdd("->lx_pcm_trigger\n");
550
551 return lx_pcm_trigger_dispatch(chip, stream, cmd);
552}
553
554static int snd_lx6464es_free(struct lx6464es *chip)
555{
556 snd_printdd("->snd_lx6464es_free\n");
557
558 lx_irq_disable(chip);
559
560 if (chip->irq >= 0)
561 free_irq(chip->irq, chip);
562
563 iounmap(chip->port_dsp_bar);
564 ioport_unmap(chip->port_plx_remapped);
565
566 pci_release_regions(chip->pci);
567 pci_disable_device(chip->pci);
568
569 kfree(chip);
570
571 return 0;
572}
573
574static int snd_lx6464es_dev_free(struct snd_device *device)
575{
576 return snd_lx6464es_free(device->device_data);
577}
578
579/* reset the dsp during initialization */
580static int __devinit lx_init_xilinx_reset(struct lx6464es *chip)
581{
582 int i;
583 u32 plx_reg = lx_plx_reg_read(chip, ePLX_CHIPSC);
584
585 snd_printdd("->lx_init_xilinx_reset\n");
586
587 /* activate reset of xilinx */
588 plx_reg &= ~CHIPSC_RESET_XILINX;
589
590 lx_plx_reg_write(chip, ePLX_CHIPSC, plx_reg);
591 msleep(1);
592
593 lx_plx_reg_write(chip, ePLX_MBOX3, 0);
594 msleep(1);
595
596 plx_reg |= CHIPSC_RESET_XILINX;
597 lx_plx_reg_write(chip, ePLX_CHIPSC, plx_reg);
598
599 /* deactivate reset of xilinx */
600 for (i = 0; i != 100; ++i) {
601 u32 reg_mbox3;
602 msleep(10);
603 reg_mbox3 = lx_plx_reg_read(chip, ePLX_MBOX3);
604 if (reg_mbox3) {
605 snd_printd(LXP "xilinx reset done\n");
606 snd_printdd(LXP "xilinx took %d loops\n", i);
607 break;
608 }
609 }
610
611 /* todo: add some error handling? */
612
613 /* clear mr */
614 lx_dsp_reg_write(chip, eReg_CSM, 0);
615
616 /* le xilinx ES peut ne pas etre encore pret, on attend. */
617 msleep(600);
618
619 return 0;
620}
621
622static int __devinit lx_init_xilinx_test(struct lx6464es *chip)
623{
624 u32 reg;
625
626 snd_printdd("->lx_init_xilinx_test\n");
627
628 /* TEST if we have access to Xilinx/MicroBlaze */
629 lx_dsp_reg_write(chip, eReg_CSM, 0);
630
631 reg = lx_dsp_reg_read(chip, eReg_CSM);
632
633 if (reg) {
634 snd_printk(KERN_ERR LXP "Problem: Reg_CSM %x.\n", reg);
635
636 /* PCI9056_SPACE0_REMAP */
637 lx_plx_reg_write(chip, ePLX_PCICR, 1);
638
639 reg = lx_dsp_reg_read(chip, eReg_CSM);
640 if (reg) {
641 snd_printk(KERN_ERR LXP "Error: Reg_CSM %x.\n", reg);
642 return -EAGAIN; /* seems to be appropriate */
643 }
644 }
645
646 snd_printd(LXP "Xilinx/MicroBlaze access test successful\n");
647
648 return 0;
649}
650
651/* initialize ethersound */
652static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
653{
654 int i;
655 u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES);
656
657 u32 default_conf_es = (64 << IOCR_OUTPUTS_OFFSET) |
658 (64 << IOCR_INPUTS_OFFSET) |
659 (FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET);
660
661 u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK)
662 | (default_conf_es & CONFES_WRITE_PART_MASK);
663
664 snd_printdd("->lx_init_ethersound\n");
665
666 chip->freq_ratio = FREQ_RATIO_SINGLE_MODE;
667
668 /*
669 * write it to the card !
670 * this actually kicks the ES xilinx, the first time since poweron.
671 * the MAC address in the Reg_ADMACESMSB Reg_ADMACESLSB registers
672 * is not ready before this is done, and the bit 2 in Reg_CSES is set.
673 * */
674 lx_dsp_reg_write(chip, eReg_CONFES, conf_es);
675
676 for (i = 0; i != 1000; ++i) {
677 if (lx_dsp_reg_read(chip, eReg_CSES) & 4) {
678 snd_printd(LXP "ethersound initialized after %dms\n",
679 i);
680 goto ethersound_initialized;
681 }
682 msleep(1);
683 }
684 snd_printk(KERN_WARNING LXP
685 "ethersound could not be initialized after %dms\n", i);
686 return -ETIMEDOUT;
687
688 ethersound_initialized:
689 snd_printd(LXP "ethersound initialized\n");
690 return 0;
691}
692
693static int __devinit lx_init_get_version_features(struct lx6464es *chip)
694{
695 u32 dsp_version;
696
697 int err;
698
699 snd_printdd("->lx_init_get_version_features\n");
700
701 err = lx_dsp_get_version(chip, &dsp_version);
702
703 if (err == 0) {
704 u32 freq;
705
706 snd_printk(LXP "DSP version: V%02d.%02d #%d\n",
707 (dsp_version>>16) & 0xff, (dsp_version>>8) & 0xff,
708 dsp_version & 0xff);
709
710 /* later: what firmware version do we expect? */
711
712 /* retrieve Play/Rec features */
713 /* done here because we may have to handle alternate
714 * DSP files. */
715 /* later */
716
717 /* init the EtherSound sample rate */
718 err = lx_dsp_get_clock_frequency(chip, &freq);
719 if (err == 0)
720 chip->board_sample_rate = freq;
721 snd_printd(LXP "actual clock frequency %d\n", freq);
722 } else {
723 snd_printk(KERN_ERR LXP "DSP corrupted \n");
724 err = -EAGAIN;
725 }
726
727 return err;
728}
729
730static int lx_set_granularity(struct lx6464es *chip, u32 gran)
731{
732 int err = 0;
733 u32 snapped_gran = MICROBLAZE_IBL_MIN;
734
735 snd_printdd("->lx_set_granularity\n");
736
737 /* blocksize is a power of 2 */
738 while ((snapped_gran < gran) &&
739 (snapped_gran < MICROBLAZE_IBL_MAX)) {
740 snapped_gran *= 2;
741 }
742
743 if (snapped_gran == chip->pcm_granularity)
744 return 0;
745
746 err = lx_dsp_set_granularity(chip, snapped_gran);
747 if (err < 0) {
748 snd_printk(KERN_WARNING LXP "could not set granularity\n");
749 err = -EAGAIN;
750 }
751
752 if (snapped_gran != gran)
753 snd_printk(LXP "snapped blocksize to %d\n", snapped_gran);
754
755 snd_printd(LXP "set blocksize on board %d\n", snapped_gran);
756 chip->pcm_granularity = snapped_gran;
757
758 return err;
759}
760
761/* initialize and test the xilinx dsp chip */
762static int __devinit lx_init_dsp(struct lx6464es *chip)
763{
764 int err;
765 u8 mac_address[6];
766 int i;
767
768 snd_printdd("->lx_init_dsp\n");
769
770 snd_printd(LXP "initialize board\n");
771 err = lx_init_xilinx_reset(chip);
772 if (err)
773 return err;
774
775 snd_printd(LXP "testing board\n");
776 err = lx_init_xilinx_test(chip);
777 if (err)
778 return err;
779
780 snd_printd(LXP "initialize ethersound configuration\n");
781 err = lx_init_ethersound_config(chip);
782 if (err)
783 return err;
784
785 lx_irq_enable(chip);
786
787 /** \todo the mac address should be ready by not, but it isn't,
788 * so we wait for it */
789 for (i = 0; i != 1000; ++i) {
790 err = lx_dsp_get_mac(chip, mac_address);
791 if (err)
792 return err;
793 if (mac_address[0] || mac_address[1] || mac_address[2] ||
794 mac_address[3] || mac_address[4] || mac_address[5])
795 goto mac_ready;
796 msleep(1);
797 }
798 return -ETIMEDOUT;
799
800mac_ready:
801 snd_printd(LXP "mac address ready read after: %dms\n", i);
802 snd_printk(LXP "mac address: %02X.%02X.%02X.%02X.%02X.%02X\n",
803 mac_address[0], mac_address[1], mac_address[2],
804 mac_address[3], mac_address[4], mac_address[5]);
805
806 err = lx_init_get_version_features(chip);
807 if (err)
808 return err;
809
810 lx_set_granularity(chip, MICROBLAZE_IBL_DEFAULT);
811
812 chip->playback_mute = 0;
813
814 return err;
815}
816
817static struct snd_pcm_ops lx_ops_playback = {
818 .open = lx_pcm_open,
819 .close = lx_pcm_close,
820 .ioctl = snd_pcm_lib_ioctl,
821 .prepare = lx_pcm_prepare,
822 .hw_params = lx_pcm_hw_params_playback,
823 .hw_free = lx_pcm_hw_free,
824 .trigger = lx_pcm_trigger,
825 .pointer = lx_pcm_stream_pointer,
826};
827
828static struct snd_pcm_ops lx_ops_capture = {
829 .open = lx_pcm_open,
830 .close = lx_pcm_close,
831 .ioctl = snd_pcm_lib_ioctl,
832 .prepare = lx_pcm_prepare,
833 .hw_params = lx_pcm_hw_params_capture,
834 .hw_free = lx_pcm_hw_free,
835 .trigger = lx_pcm_trigger,
836 .pointer = lx_pcm_stream_pointer,
837};
838
839static int __devinit lx_pcm_create(struct lx6464es *chip)
840{
841 int err;
842 struct snd_pcm *pcm;
843
844 u32 size = 64 * /* channels */
845 3 * /* 24 bit samples */
846 MAX_STREAM_BUFFER * /* periods */
847 MICROBLAZE_IBL_MAX * /* frames per period */
848 2; /* duplex */
849
850 size = PAGE_ALIGN(size);
851
852 /* hardcoded device name & channel count */
853 err = snd_pcm_new(chip->card, (char *)card_name, 0,
854 1, 1, &pcm);
855
856 pcm->private_data = chip;
857
858 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &lx_ops_playback);
859 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &lx_ops_capture);
860
861 pcm->info_flags = 0;
862 strcpy(pcm->name, card_name);
863
864 err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
865 snd_dma_pci_data(chip->pci),
866 size, size);
867 if (err < 0)
868 return err;
869
870 chip->pcm = pcm;
871 chip->capture_stream.is_capture = 1;
872
873 return 0;
874}
875
876static int lx_control_playback_info(struct snd_kcontrol *kcontrol,
877 struct snd_ctl_elem_info *uinfo)
878{
879 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
880 uinfo->count = 1;
881 uinfo->value.integer.min = 0;
882 uinfo->value.integer.max = 1;
883 return 0;
884}
885
886static int lx_control_playback_get(struct snd_kcontrol *kcontrol,
887 struct snd_ctl_elem_value *ucontrol)
888{
889 struct lx6464es *chip = snd_kcontrol_chip(kcontrol);
890 ucontrol->value.integer.value[0] = chip->playback_mute;
891 return 0;
892}
893
894static int lx_control_playback_put(struct snd_kcontrol *kcontrol,
895 struct snd_ctl_elem_value *ucontrol)
896{
897 struct lx6464es *chip = snd_kcontrol_chip(kcontrol);
898 int changed = 0;
899 int current_value = chip->playback_mute;
900
901 if (current_value != ucontrol->value.integer.value[0]) {
902 lx_level_unmute(chip, 0, !current_value);
903 chip->playback_mute = !current_value;
904 changed = 1;
905 }
906 return changed;
907}
908
909static struct snd_kcontrol_new lx_control_playback_switch __devinitdata = {
910 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
911 .name = "PCM Playback Switch",
912 .index = 0,
913 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
914 .private_value = 0,
915 .info = lx_control_playback_info,
916 .get = lx_control_playback_get,
917 .put = lx_control_playback_put
918};
919
920
921
922static void lx_proc_levels_read(struct snd_info_entry *entry,
923 struct snd_info_buffer *buffer)
924{
925 u32 levels[64];
926 int err;
927 int i, j;
928 struct lx6464es *chip = entry->private_data;
929
930 snd_iprintf(buffer, "capture levels:\n");
931 err = lx_level_peaks(chip, 1, 64, levels);
932 if (err < 0)
933 return;
934
935 for (i = 0; i != 8; ++i) {
936 for (j = 0; j != 8; ++j)
937 snd_iprintf(buffer, "%08x ", levels[i*8+j]);
938 snd_iprintf(buffer, "\n");
939 }
940
941 snd_iprintf(buffer, "\nplayback levels:\n");
942
943 err = lx_level_peaks(chip, 0, 64, levels);
944 if (err < 0)
945 return;
946
947 for (i = 0; i != 8; ++i) {
948 for (j = 0; j != 8; ++j)
949 snd_iprintf(buffer, "%08x ", levels[i*8+j]);
950 snd_iprintf(buffer, "\n");
951 }
952
953 snd_iprintf(buffer, "\n");
954}
955
956static int __devinit lx_proc_create(struct snd_card *card, struct lx6464es *chip)
957{
958 struct snd_info_entry *entry;
959 int err = snd_card_proc_new(card, "levels", &entry);
960 if (err < 0)
961 return err;
962
963 snd_info_set_text_ops(entry, chip, lx_proc_levels_read);
964 return 0;
965}
966
967
968static int __devinit snd_lx6464es_create(struct snd_card *card,
969 struct pci_dev *pci,
970 struct lx6464es **rchip)
971{
972 struct lx6464es *chip;
973 int err;
974
975 static struct snd_device_ops ops = {
976 .dev_free = snd_lx6464es_dev_free,
977 };
978
979 snd_printdd("->snd_lx6464es_create\n");
980
981 *rchip = NULL;
982
983 /* enable PCI device */
984 err = pci_enable_device(pci);
985 if (err < 0)
986 return err;
987
988 pci_set_master(pci);
989
990 /* check if we can restrict PCI DMA transfers to 32 bits */
991 err = pci_set_dma_mask(pci, DMA_32BIT_MASK);
992 if (err < 0) {
993 snd_printk(KERN_ERR "architecture does not support "
994 "32bit PCI busmaster DMA\n");
995 pci_disable_device(pci);
996 return -ENXIO;
997 }
998
999 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
1000 if (chip == NULL) {
1001 err = -ENOMEM;
1002 goto alloc_failed;
1003 }
1004
1005 chip->card = card;
1006 chip->pci = pci;
1007 chip->irq = -1;
1008
1009 /* initialize synchronization structs */
1010 spin_lock_init(&chip->lock);
1011 spin_lock_init(&chip->msg_lock);
1012 mutex_init(&chip->setup_mutex);
1013 tasklet_init(&chip->trigger_tasklet, lx_trigger_tasklet,
1014 (unsigned long)chip);
1015 tasklet_init(&chip->tasklet_capture, lx_tasklet_capture,
1016 (unsigned long)chip);
1017 tasklet_init(&chip->tasklet_playback, lx_tasklet_playback,
1018 (unsigned long)chip);
1019
1020 /* request resources */
1021 err = pci_request_regions(pci, card_name);
1022 if (err < 0)
1023 goto request_regions_failed;
1024
1025 /* plx port */
1026 chip->port_plx = pci_resource_start(pci, 1);
1027 chip->port_plx_remapped = ioport_map(chip->port_plx,
1028 pci_resource_len(pci, 1));
1029
1030 /* dsp port */
1031 chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
1032
1033 err = request_irq(pci->irq, lx_interrupt, IRQF_SHARED,
1034 card_name, chip);
1035 if (err) {
1036 snd_printk(KERN_ERR LXP "unable to grab IRQ %d\n", pci->irq);
1037 goto request_irq_failed;
1038 }
1039 chip->irq = pci->irq;
1040
1041 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
1042 if (err < 0)
1043 goto device_new_failed;
1044
1045 err = lx_init_dsp(chip);
1046 if (err < 0) {
1047 snd_printk(KERN_ERR LXP "error during DSP initialization\n");
1048 return err;
1049 }
1050
1051 err = lx_pcm_create(chip);
1052 if (err < 0)
1053 return err;
1054
1055 err = lx_proc_create(card, chip);
1056 if (err < 0)
1057 return err;
1058
1059 err = snd_ctl_add(card, snd_ctl_new1(&lx_control_playback_switch,
1060 chip));
1061 if (err < 0)
1062 return err;
1063
1064 snd_card_set_dev(card, &pci->dev);
1065
1066 *rchip = chip;
1067 return 0;
1068
1069device_new_failed:
1070 free_irq(pci->irq, chip);
1071
1072request_irq_failed:
1073 pci_release_regions(pci);
1074
1075request_regions_failed:
1076 kfree(chip);
1077
1078alloc_failed:
1079 pci_disable_device(pci);
1080
1081 return err;
1082}
1083
1084static int __devinit snd_lx6464es_probe(struct pci_dev *pci,
1085 const struct pci_device_id *pci_id)
1086{
1087 static int dev;
1088 struct snd_card *card;
1089 struct lx6464es *chip;
1090 int err;
1091
1092 snd_printdd("->snd_lx6464es_probe\n");
1093
1094 if (dev >= SNDRV_CARDS)
1095 return -ENODEV;
1096 if (!enable[dev]) {
1097 dev++;
1098 return -ENOENT;
1099 }
1100
1101 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
1102 if (err < 0)
1103 return err;
1104
1105 err = snd_lx6464es_create(card, pci, &chip);
1106 if (err < 0) {
1107 snd_printk(KERN_ERR LXP "error during snd_lx6464es_create\n");
1108 goto out_free;
1109 }
1110
1111 strcpy(card->driver, "lx6464es");
1112 strcpy(card->shortname, "Digigram LX6464ES");
1113 sprintf(card->longname, "%s at 0x%lx, 0x%p, irq %i",
1114 card->shortname, chip->port_plx,
1115 chip->port_dsp_bar, chip->irq);
1116
1117 err = snd_card_register(card);
1118 if (err < 0)
1119 goto out_free;
1120
1121 snd_printdd(LXP "initialization successful\n");
1122 pci_set_drvdata(pci, card);
1123 dev++;
1124 return 0;
1125
1126out_free:
1127 snd_card_free(card);
1128 return err;
1129
1130}
1131
1132static void __devexit snd_lx6464es_remove(struct pci_dev *pci)
1133{
1134 snd_card_free(pci_get_drvdata(pci));
1135 pci_set_drvdata(pci, NULL);
1136}
1137
1138
1139static struct pci_driver driver = {
1140 .name = "Digigram LX6464ES",
1141 .id_table = snd_lx6464es_ids,
1142 .probe = snd_lx6464es_probe,
1143 .remove = __devexit_p(snd_lx6464es_remove),
1144};
1145
1146
1147/* module initialization */
1148static int __init mod_init(void)
1149{
1150 return pci_register_driver(&driver);
1151}
1152
1153static void __exit mod_exit(void)
1154{
1155 pci_unregister_driver(&driver);
1156}
1157
1158module_init(mod_init);
1159module_exit(mod_exit);
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
new file mode 100644
index 000000000000..012c010c8c89
--- /dev/null
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -0,0 +1,114 @@
1/* -*- linux-c -*- *
2 *
3 * ALSA driver for the digigram lx6464es interface
4 *
5 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
22 *
23 */
24
25#ifndef LX6464ES_H
26#define LX6464ES_H
27
28#include <linux/spinlock.h>
29#include <asm/atomic.h>
30
31#include <sound/core.h>
32#include <sound/pcm.h>
33
34#include "lx_core.h"
35
36#define LXP "LX6464ES: "
37
38enum {
39 ES_cmd_free = 0, /* no command executing */
40 ES_cmd_processing = 1, /* execution of a read/write command */
41 ES_read_pending = 2, /* a asynchron read command is pending */
42 ES_read_finishing = 3, /* a read command has finished waiting (set by
43 * Interrupt or CancelIrp) */
44};
45
46enum lx_stream_status {
47 LX_STREAM_STATUS_FREE,
48/* LX_STREAM_STATUS_OPEN, */
49 LX_STREAM_STATUS_SCHEDULE_RUN,
50/* LX_STREAM_STATUS_STARTED, */
51 LX_STREAM_STATUS_RUNNING,
52 LX_STREAM_STATUS_SCHEDULE_STOP,
53/* LX_STREAM_STATUS_STOPPED, */
54/* LX_STREAM_STATUS_PAUSED */
55};
56
57
58struct lx_stream {
59 struct snd_pcm_substream *stream;
60 snd_pcm_uframes_t frame_pos;
61 enum lx_stream_status status; /* free, open, running, draining
62 * pause */
63 int is_capture:1;
64};
65
66
67struct lx6464es {
68 struct snd_card *card;
69 struct pci_dev *pci;
70 int irq;
71
72 spinlock_t lock; /* interrupt spinlock */
73 struct mutex setup_mutex; /* mutex used in hw_params, open
74 * and close */
75
76 struct tasklet_struct trigger_tasklet; /* trigger tasklet */
77 struct tasklet_struct tasklet_capture;
78 struct tasklet_struct tasklet_playback;
79
80 /* ports */
81 unsigned long port_plx; /* io port (size=256) */
82 void __iomem *port_plx_remapped; /* remapped plx port */
83 void __iomem *port_dsp_bar; /* memory port (32-bit,
84 * non-prefetchable,
85 * size=8K) */
86
87 /* messaging */
88 spinlock_t msg_lock; /* message spinlock */
89 atomic_t send_message_locked;
90 struct lx_rmh rmh;
91
92 /* configuration */
93 uint freq_ratio : 2;
94 uint playback_mute : 1;
95 uint hardware_running[2];
96 u32 board_sample_rate; /* sample rate read from
97 * board */
98 u32 sample_rate; /* our sample rate */
99 u16 pcm_granularity; /* board blocksize */
100
101 /* dma */
102 struct snd_dma_buffer capture_dma_buf;
103 struct snd_dma_buffer playback_dma_buf;
104
105 /* pcm */
106 struct snd_pcm *pcm;
107
108 /* streams */
109 struct lx_stream capture_stream;
110 struct lx_stream playback_stream;
111};
112
113
114#endif /* LX6464ES_H */
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
new file mode 100644
index 000000000000..5812780d6e89
--- /dev/null
+++ b/sound/pci/lx6464es/lx_core.c
@@ -0,0 +1,1444 @@
1/* -*- linux-c -*- *
2 *
3 * ALSA driver for the digigram lx6464es interface
4 * low-level interface
5 *
6 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
22 *
23 */
24
25/* #define RMH_DEBUG 1 */
26
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/delay.h>
30
31#include "lx6464es.h"
32#include "lx_core.h"
33
34/* low-level register access */
35
36static const unsigned long dsp_port_offsets[] = {
37 0,
38 0x400,
39 0x401,
40 0x402,
41 0x403,
42 0x404,
43 0x405,
44 0x406,
45 0x407,
46 0x408,
47 0x409,
48 0x40a,
49 0x40b,
50 0x40c,
51
52 0x410,
53 0x411,
54 0x412,
55 0x413,
56 0x414,
57 0x415,
58 0x416,
59
60 0x420,
61 0x430,
62 0x431,
63 0x432,
64 0x433,
65 0x434,
66 0x440
67};
68
69static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
70{
71 void __iomem *base_address = chip->port_dsp_bar;
72 return base_address + dsp_port_offsets[port]*4;
73}
74
75unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
76{
77 void __iomem *address = lx_dsp_register(chip, port);
78 return ioread32(address);
79}
80
81void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
82{
83 void __iomem *address = lx_dsp_register(chip, port);
84 memcpy_fromio(data, address, len*sizeof(u32));
85}
86
87
88void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
89{
90 void __iomem *address = lx_dsp_register(chip, port);
91 iowrite32(data, address);
92}
93
94void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
95 u32 len)
96{
97 void __iomem *address = lx_dsp_register(chip, port);
98 memcpy_toio(address, data, len*sizeof(u32));
99}
100
101
102static const unsigned long plx_port_offsets[] = {
103 0x04,
104 0x40,
105 0x44,
106 0x48,
107 0x4c,
108 0x50,
109 0x54,
110 0x58,
111 0x5c,
112 0x64,
113 0x68,
114 0x6C
115};
116
117static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
118{
119 void __iomem *base_address = chip->port_plx_remapped;
120 return base_address + plx_port_offsets[port];
121}
122
123unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
124{
125 void __iomem *address = lx_plx_register(chip, port);
126 return ioread32(address);
127}
128
129void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
130{
131 void __iomem *address = lx_plx_register(chip, port);
132 iowrite32(data, address);
133}
134
135u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr)
136{
137 int index;
138
139 switch (mbox_nr) {
140 case 1:
141 index = ePLX_MBOX1; break;
142 case 2:
143 index = ePLX_MBOX2; break;
144 case 3:
145 index = ePLX_MBOX3; break;
146 case 4:
147 index = ePLX_MBOX4; break;
148 case 5:
149 index = ePLX_MBOX5; break;
150 case 6:
151 index = ePLX_MBOX6; break;
152 case 7:
153 index = ePLX_MBOX7; break;
154 case 0: /* reserved for HF flags */
155 snd_BUG();
156 default:
157 return 0xdeadbeef;
158 }
159
160 return lx_plx_reg_read(chip, index);
161}
162
163int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value)
164{
165 int index = -1;
166
167 switch (mbox_nr) {
168 case 1:
169 index = ePLX_MBOX1; break;
170 case 3:
171 index = ePLX_MBOX3; break;
172 case 4:
173 index = ePLX_MBOX4; break;
174 case 5:
175 index = ePLX_MBOX5; break;
176 case 6:
177 index = ePLX_MBOX6; break;
178 case 7:
179 index = ePLX_MBOX7; break;
180 case 0: /* reserved for HF flags */
181 case 2: /* reserved for Pipe States
182 * the DSP keeps an image of it */
183 snd_BUG();
184 return -EBADRQC;
185 }
186
187 lx_plx_reg_write(chip, index, value);
188 return 0;
189}
190
191
192/* rmh */
193
194#ifdef CONFIG_SND_DEBUG
195#define CMD_NAME(a) a
196#else
197#define CMD_NAME(a) NULL
198#endif
199
200#define Reg_CSM_MR 0x00000002
201#define Reg_CSM_MC 0x00000001
202
203struct dsp_cmd_info {
204 u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
205 * word).*/
206 u16 dcCmdLength; /* Command length in words of 24 bits.*/
207 u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
208 * random. */
209 u16 dcStatusLength; /* Status length (if fixed).*/
210 char *dcOpName;
211};
212
213/*
214 Initialization and control data for the Microblaze interface
215 - OpCode:
216 the opcode field of the command set at the proper offset
217 - CmdLength
218 the number of command words
219 - StatusType
220 offset in the status registers: 0 means that the return value may be
221 different from 0, and must be read
222 - StatusLength
223 the number of status words (in addition to the return value)
224*/
225
226static struct dsp_cmd_info dsp_commands[] =
227{
228 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
229 , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
230 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
231 , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
232 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
233 , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
234 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
235 , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
236 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
237 , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
238 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
239 , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
240 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
241 , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
242 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
243 , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
244 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
245 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
246 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
247 , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
248 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
249 , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
250 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
251 , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
252 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
253 , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
254 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
255 , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
256 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
257 , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
258 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
259 , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
260 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
261 , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
262 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
263 , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
264 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
265 , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
266 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
267 , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
268};
269
270static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
271{
272 snd_BUG_ON(cmd >= CMD_14_INVALID);
273
274 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
275 rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
276 rmh->stat_len = dsp_commands[cmd].dcStatusLength;
277 rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
278 rmh->cmd_idx = cmd;
279 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
280
281#ifdef CONFIG_SND_DEBUG
282 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
283#endif
284#ifdef RMH_DEBUG
285 rmh->cmd_idx = cmd;
286#endif
287}
288
289#ifdef RMH_DEBUG
290#define LXRMH "lx6464es rmh: "
291static void lx_message_dump(struct lx_rmh *rmh)
292{
293 u8 idx = rmh->cmd_idx;
294 int i;
295
296 snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
297
298 for (i = 0; i != rmh->cmd_len; ++i)
299 snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
300
301 for (i = 0; i != rmh->stat_len; ++i)
302 snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
303 snd_printk("\n");
304}
305#else
306static inline void lx_message_dump(struct lx_rmh *rmh)
307{}
308#endif
309
310
311
312/* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
313#define XILINX_TIMEOUT_MS 40
314#define XILINX_POLL_NO_SLEEP 100
315#define XILINX_POLL_ITERATIONS 150
316
317#if 0 /* not used now */
318static int lx_message_send(struct lx6464es *chip, struct lx_rmh *rmh)
319{
320 u32 reg = ED_DSP_TIMED_OUT;
321 int dwloop;
322 int answer_received;
323
324 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
325 snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
326 return -EBUSY;
327 }
328
329 /* write command */
330 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
331
332 snd_BUG_ON(atomic_read(&chip->send_message_locked) != 0);
333 atomic_set(&chip->send_message_locked, 1);
334
335 /* MicoBlaze gogogo */
336 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
337
338 /* wait for interrupt to answer */
339 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS; ++dwloop) {
340 answer_received = atomic_read(&chip->send_message_locked);
341 if (answer_received == 0)
342 break;
343 msleep(1);
344 }
345
346 if (answer_received == 0) {
347 /* in Debug mode verify Reg_CSM_MR */
348 snd_BUG_ON(!(lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR));
349
350 /* command finished, read status */
351 if (rmh->dsp_stat == 0)
352 reg = lx_dsp_reg_read(chip, eReg_CRM1);
353 else
354 reg = 0;
355 } else {
356 int i;
357 snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send! "
358 "Interrupts disabled?\n");
359
360 /* attente bit Reg_CSM_MR */
361 for (i = 0; i != XILINX_POLL_ITERATIONS; i++) {
362 if ((lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR)) {
363 if (rmh->dsp_stat == 0)
364 reg = lx_dsp_reg_read(chip, eReg_CRM1);
365 else
366 reg = 0;
367 goto polling_successful;
368 }
369
370 if (i > XILINX_POLL_NO_SLEEP)
371 msleep(1);
372 }
373 snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send! "
374 "polling failed\n");
375
376polling_successful:
377 atomic_set(&chip->send_message_locked, 0);
378 }
379
380 if ((reg & ERROR_VALUE) == 0) {
381 /* read response */
382 if (rmh->stat_len) {
383 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
384
385 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
386 rmh->stat_len);
387 }
388 } else
389 snd_printk(KERN_WARNING LXP "lx_message_send: error_value %x\n",
390 reg);
391
392 /* clear Reg_CSM_MR */
393 lx_dsp_reg_write(chip, eReg_CSM, 0);
394
395 switch (reg) {
396 case ED_DSP_TIMED_OUT:
397 snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
398 return -ETIMEDOUT;
399
400 case ED_DSP_CRASHED:
401 snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
402 return -EAGAIN;
403 }
404
405 lx_message_dump(rmh);
406 return 0;
407}
408#endif /* not used now */
409
410static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
411{
412 u32 reg = ED_DSP_TIMED_OUT;
413 int dwloop;
414
415 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
416 snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
417 return -EBUSY;
418 }
419
420 /* write command */
421 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
422
423 /* MicoBlaze gogogo */
424 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
425
426 /* wait for interrupt to answer */
427 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
428 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
429 if (rmh->dsp_stat == 0)
430 reg = lx_dsp_reg_read(chip, eReg_CRM1);
431 else
432 reg = 0;
433 goto polling_successful;
434 } else
435 udelay(1);
436 }
437 snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! "
438 "polling failed\n");
439
440polling_successful:
441 if ((reg & ERROR_VALUE) == 0) {
442 /* read response */
443 if (rmh->stat_len) {
444 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
445 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
446 rmh->stat_len);
447 }
448 } else
449 snd_printk(LXP "rmh error: %08x\n", reg);
450
451 /* clear Reg_CSM_MR */
452 lx_dsp_reg_write(chip, eReg_CSM, 0);
453
454 switch (reg) {
455 case ED_DSP_TIMED_OUT:
456 snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
457 return -ETIMEDOUT;
458
459 case ED_DSP_CRASHED:
460 snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
461 return -EAGAIN;
462 }
463
464 lx_message_dump(rmh);
465
466 return reg;
467}
468
469
470/* low-level dsp access */
471int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
472{
473 u16 ret;
474 unsigned long flags;
475
476 spin_lock_irqsave(&chip->msg_lock, flags);
477
478 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
479 ret = lx_message_send_atomic(chip, &chip->rmh);
480
481 *rdsp_version = chip->rmh.stat[1];
482 spin_unlock_irqrestore(&chip->msg_lock, flags);
483 return ret;
484}
485
486int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
487{
488 u16 ret = 0;
489 unsigned long flags;
490 u32 freq_raw = 0;
491 u32 freq = 0;
492 u32 frequency = 0;
493
494 spin_lock_irqsave(&chip->msg_lock, flags);
495
496 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
497 ret = lx_message_send_atomic(chip, &chip->rmh);
498
499 if (ret == 0) {
500 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
501 freq = freq_raw & XES_FREQ_COUNT8_MASK;
502
503 if ((freq < XES_FREQ_COUNT8_48_MAX) ||
504 (freq > XES_FREQ_COUNT8_44_MIN))
505 frequency = 0; /* unknown */
506 else if (freq >= XES_FREQ_COUNT8_44_MAX)
507 frequency = 44100;
508 else
509 frequency = 48000;
510 }
511
512 spin_unlock_irqrestore(&chip->msg_lock, flags);
513
514 *rfreq = frequency * chip->freq_ratio;
515
516 return ret;
517}
518
519int lx_dsp_get_mac(struct lx6464es *chip, u8 *mac_address)
520{
521 u32 macmsb, maclsb;
522
523 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
524 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
525
526 /* todo: endianess handling */
527 mac_address[5] = ((u8 *)(&maclsb))[0];
528 mac_address[4] = ((u8 *)(&maclsb))[1];
529 mac_address[3] = ((u8 *)(&maclsb))[2];
530 mac_address[2] = ((u8 *)(&macmsb))[0];
531 mac_address[1] = ((u8 *)(&macmsb))[1];
532 mac_address[0] = ((u8 *)(&macmsb))[2];
533
534 return 0;
535}
536
537
538int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
539{
540 unsigned long flags;
541 int ret;
542
543 spin_lock_irqsave(&chip->msg_lock, flags);
544
545 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
546 chip->rmh.cmd[0] |= gran;
547
548 ret = lx_message_send_atomic(chip, &chip->rmh);
549 spin_unlock_irqrestore(&chip->msg_lock, flags);
550 return ret;
551}
552
553int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
554{
555 unsigned long flags;
556 int ret;
557
558 spin_lock_irqsave(&chip->msg_lock, flags);
559
560 lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
561 chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
562
563 ret = lx_message_send_atomic(chip, &chip->rmh);
564
565 if (!ret)
566 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
567
568 spin_unlock_irqrestore(&chip->msg_lock, flags);
569 return ret;
570}
571
572#define CSES_TIMEOUT 100 /* microseconds */
573#define CSES_CE 0x0001
574#define CSES_BROADCAST 0x0002
575#define CSES_UPDATE_LDSV 0x0004
576
577int lx_dsp_es_check_pipeline(struct lx6464es *chip)
578{
579 int i;
580
581 for (i = 0; i != CSES_TIMEOUT; ++i) {
582 /*
583 * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog
584 * est pret. il re-passe à 0 lorsque le premier read a
585 * été fait. pour l'instant on retire le test car ce bit
586 * passe a 1 environ 200 à 400 ms aprés que le registre
587 * confES à été écrit (kick du xilinx ES).
588 *
589 * On ne teste que le bit CE.
590 * */
591
592 u32 cses = lx_dsp_reg_read(chip, eReg_CSES);
593
594 if ((cses & CSES_CE) == 0)
595 return 0;
596
597 udelay(1);
598 }
599
600 return -ETIMEDOUT;
601}
602
603
604#define PIPE_INFO_TO_CMD(capture, pipe) \
605 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
606
607
608
609/* low-level pipe handling */
610int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
611 int channels)
612{
613 int err;
614 unsigned long flags;
615
616 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
617
618 spin_lock_irqsave(&chip->msg_lock, flags);
619 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
620
621 chip->rmh.cmd[0] |= pipe_cmd;
622 chip->rmh.cmd[0] |= channels;
623
624 err = lx_message_send_atomic(chip, &chip->rmh);
625 spin_unlock_irqrestore(&chip->msg_lock, flags);
626
627 if (err != 0)
628 snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n");
629
630 return err;
631}
632
633int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
634{
635 int err;
636 unsigned long flags;
637
638 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
639
640 spin_lock_irqsave(&chip->msg_lock, flags);
641 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
642
643 chip->rmh.cmd[0] |= pipe_cmd;
644
645 err = lx_message_send_atomic(chip, &chip->rmh);
646 spin_unlock_irqrestore(&chip->msg_lock, flags);
647
648 return err;
649}
650
651int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
652 u32 *r_needed, u32 *r_freed, u32 *size_array)
653{
654 int err;
655 unsigned long flags;
656
657 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
658
659#ifdef CONFIG_SND_DEBUG
660 if (size_array)
661 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
662#endif
663
664 *r_needed = 0;
665 *r_freed = 0;
666
667 spin_lock_irqsave(&chip->msg_lock, flags);
668 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
669
670 chip->rmh.cmd[0] |= pipe_cmd;
671
672 err = lx_message_send_atomic(chip, &chip->rmh);
673
674 if (!err) {
675 int i;
676 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
677 u32 stat = chip->rmh.stat[i];
678 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
679 /* finished */
680 *r_freed += 1;
681 if (size_array)
682 size_array[i] = stat & MASK_DATA_SIZE;
683 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
684 == 0)
685 /* free */
686 *r_needed += 1;
687 }
688
689#if 0
690 snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
691 *r_needed, *r_freed);
692 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
693 for (i = 0; i != chip->rmh.stat_len; ++i)
694 snd_printdd(" stat[%d]: %x, %x\n", i,
695 chip->rmh.stat[i],
696 chip->rmh.stat[i] & MASK_DATA_SIZE);
697 }
698#endif
699 }
700
701 spin_unlock_irqrestore(&chip->msg_lock, flags);
702 return err;
703}
704
705
706int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
707{
708 int err;
709 unsigned long flags;
710
711 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
712
713 spin_lock_irqsave(&chip->msg_lock, flags);
714 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
715
716 chip->rmh.cmd[0] |= pipe_cmd;
717
718 err = lx_message_send_atomic(chip, &chip->rmh);
719
720 spin_unlock_irqrestore(&chip->msg_lock, flags);
721 return err;
722}
723
724static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
725{
726 int err;
727 unsigned long flags;
728
729 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
730
731 spin_lock_irqsave(&chip->msg_lock, flags);
732 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
733
734 chip->rmh.cmd[0] |= pipe_cmd;
735
736 err = lx_message_send_atomic(chip, &chip->rmh);
737
738 spin_unlock_irqrestore(&chip->msg_lock, flags);
739 return err;
740}
741
742
743int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
744{
745 int err;
746
747 err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
748 if (err < 0)
749 return err;
750
751 err = lx_pipe_toggle_state(chip, pipe, is_capture);
752
753 return err;
754}
755
756int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
757{
758 int err = 0;
759
760 err = lx_pipe_wait_for_start(chip, pipe, is_capture);
761 if (err < 0)
762 return err;
763
764 err = lx_pipe_toggle_state(chip, pipe, is_capture);
765
766 return err;
767}
768
769
770int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
771 u64 *rsample_count)
772{
773 int err;
774 unsigned long flags;
775
776 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
777
778 spin_lock_irqsave(&chip->msg_lock, flags);
779 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
780
781 chip->rmh.cmd[0] |= pipe_cmd;
782 chip->rmh.stat_len = 2; /* need all words here! */
783
784 err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
785
786 if (err != 0)
787 snd_printk(KERN_ERR
788 "lx6464es: could not query pipe's sample count\n");
789 else {
790 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
791 << 24) /* hi part */
792 + chip->rmh.stat[1]; /* lo part */
793 }
794
795 spin_unlock_irqrestore(&chip->msg_lock, flags);
796 return err;
797}
798
799int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
800{
801 int err;
802 unsigned long flags;
803
804 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
805
806 spin_lock_irqsave(&chip->msg_lock, flags);
807 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
808
809 chip->rmh.cmd[0] |= pipe_cmd;
810
811 err = lx_message_send_atomic(chip, &chip->rmh);
812
813 if (err != 0)
814 snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n");
815 else
816 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
817
818 spin_unlock_irqrestore(&chip->msg_lock, flags);
819 return err;
820}
821
822static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
823 int is_capture, u16 state)
824{
825 int i;
826
827 /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
828 * timeout 50 ms */
829 for (i = 0; i != 50; ++i) {
830 u16 current_state;
831 int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
832
833 if (err < 0)
834 return err;
835
836 if (current_state == state)
837 return 0;
838
839 mdelay(1);
840 }
841
842 return -ETIMEDOUT;
843}
844
845int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
846{
847 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
848}
849
850int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
851{
852 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
853}
854
855/* low-level stream handling */
856int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
857 int is_capture, enum stream_state_t state)
858{
859 int err;
860 unsigned long flags;
861
862 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
863
864 spin_lock_irqsave(&chip->msg_lock, flags);
865 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
866
867 chip->rmh.cmd[0] |= pipe_cmd;
868 chip->rmh.cmd[0] |= state;
869
870 err = lx_message_send_atomic(chip, &chip->rmh);
871 spin_unlock_irqrestore(&chip->msg_lock, flags);
872
873 return err;
874}
875
876int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
877 u32 pipe, int is_capture)
878{
879 int err;
880 unsigned long flags;
881
882 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
883
884 u32 channels = runtime->channels;
885
886 if (runtime->channels != channels)
887 snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d",
888 runtime->channels, channels);
889
890 spin_lock_irqsave(&chip->msg_lock, flags);
891 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
892
893 chip->rmh.cmd[0] |= pipe_cmd;
894
895 if (runtime->sample_bits == 16)
896 /* 16 bit format */
897 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
898
899 if (snd_pcm_format_little_endian(runtime->format))
900 /* little endian/intel format */
901 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
902
903 chip->rmh.cmd[0] |= channels-1;
904
905 err = lx_message_send_atomic(chip, &chip->rmh);
906 spin_unlock_irqrestore(&chip->msg_lock, flags);
907
908 return err;
909}
910
911int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
912 int *rstate)
913{
914 int err;
915 unsigned long flags;
916
917 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
918
919 spin_lock_irqsave(&chip->msg_lock, flags);
920 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
921
922 chip->rmh.cmd[0] |= pipe_cmd;
923
924 err = lx_message_send_atomic(chip, &chip->rmh);
925
926 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
927
928 spin_unlock_irqrestore(&chip->msg_lock, flags);
929 return err;
930}
931
932int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
933 u64 *r_bytepos)
934{
935 int err;
936 unsigned long flags;
937
938 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
939
940 spin_lock_irqsave(&chip->msg_lock, flags);
941 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
942
943 chip->rmh.cmd[0] |= pipe_cmd;
944
945 err = lx_message_send_atomic(chip, &chip->rmh);
946
947 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
948 << 32) /* hi part */
949 + chip->rmh.stat[1]; /* lo part */
950
951 spin_unlock_irqrestore(&chip->msg_lock, flags);
952 return err;
953}
954
955/* low-level buffer handling */
956int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
957 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
958 u32 *r_buffer_index)
959{
960 int err;
961 unsigned long flags;
962
963 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
964
965 spin_lock_irqsave(&chip->msg_lock, flags);
966 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
967
968 chip->rmh.cmd[0] |= pipe_cmd;
969 chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
970
971 /* todo: pause request, circular buffer */
972
973 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
974 chip->rmh.cmd[2] = buf_address_lo;
975
976 if (buf_address_hi) {
977 chip->rmh.cmd_len = 4;
978 chip->rmh.cmd[3] = buf_address_hi;
979 chip->rmh.cmd[0] |= BF_64BITS_ADR;
980 }
981
982 err = lx_message_send_atomic(chip, &chip->rmh);
983
984 if (err == 0) {
985 *r_buffer_index = chip->rmh.stat[0];
986 goto done;
987 }
988
989 if (err == EB_RBUFFERS_TABLE_OVERFLOW)
990 snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
991
992 if (err == EB_INVALID_STREAM)
993 snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n");
994
995 if (err == EB_CMD_REFUSED)
996 snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n");
997
998 done:
999 spin_unlock_irqrestore(&chip->msg_lock, flags);
1000 return err;
1001}
1002
1003int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
1004 u32 *r_buffer_size)
1005{
1006 int err;
1007 unsigned long flags;
1008
1009 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
1010
1011 spin_lock_irqsave(&chip->msg_lock, flags);
1012 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
1013
1014 chip->rmh.cmd[0] |= pipe_cmd;
1015 chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
1016 * microblaze will seek for it */
1017
1018 err = lx_message_send_atomic(chip, &chip->rmh);
1019
1020 if (err == 0)
1021 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
1022
1023 spin_unlock_irqrestore(&chip->msg_lock, flags);
1024 return err;
1025}
1026
1027int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
1028 u32 buffer_index)
1029{
1030 int err;
1031 unsigned long flags;
1032
1033 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
1034
1035 spin_lock_irqsave(&chip->msg_lock, flags);
1036 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
1037
1038 chip->rmh.cmd[0] |= pipe_cmd;
1039 chip->rmh.cmd[0] |= buffer_index;
1040
1041 err = lx_message_send_atomic(chip, &chip->rmh);
1042
1043 spin_unlock_irqrestore(&chip->msg_lock, flags);
1044 return err;
1045}
1046
1047
1048/* low-level gain/peak handling
1049 *
1050 * \todo: can we unmute capture/playback channels independently?
1051 *
1052 * */
1053int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
1054{
1055 int err;
1056 unsigned long flags;
1057
1058 /* bit set to 1: channel muted */
1059 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
1060
1061 spin_lock_irqsave(&chip->msg_lock, flags);
1062 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
1063
1064 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
1065
1066 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
1067 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
1068
1069 snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
1070 chip->rmh.cmd[2]);
1071
1072 err = lx_message_send_atomic(chip, &chip->rmh);
1073
1074 spin_unlock_irqrestore(&chip->msg_lock, flags);
1075 return err;
1076}
1077
1078static u32 peak_map[] = {
1079 0x00000109, /* -90.308dB */
1080 0x0000083B, /* -72.247dB */
1081 0x000020C4, /* -60.205dB */
1082 0x00008273, /* -48.030dB */
1083 0x00020756, /* -36.005dB */
1084 0x00040C37, /* -30.001dB */
1085 0x00081385, /* -24.002dB */
1086 0x00101D3F, /* -18.000dB */
1087 0x0016C310, /* -15.000dB */
1088 0x002026F2, /* -12.001dB */
1089 0x002D6A86, /* -9.000dB */
1090 0x004026E6, /* -6.004dB */
1091 0x005A9DF6, /* -3.000dB */
1092 0x0065AC8B, /* -2.000dB */
1093 0x00721481, /* -1.000dB */
1094 0x007FFFFF, /* FS */
1095};
1096
1097int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
1098 u32 *r_levels)
1099{
1100 int err = 0;
1101 unsigned long flags;
1102 int i;
1103 spin_lock_irqsave(&chip->msg_lock, flags);
1104
1105 for (i = 0; i < channels; i += 4) {
1106 u32 s0, s1, s2, s3;
1107
1108 lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
1109 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
1110
1111 err = lx_message_send_atomic(chip, &chip->rmh);
1112
1113 if (err == 0) {
1114 s0 = peak_map[chip->rmh.stat[0] & 0x0F];
1115 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
1116 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
1117 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
1118 } else
1119 s0 = s1 = s2 = s3 = 0;
1120
1121 r_levels[0] = s0;
1122 r_levels[1] = s1;
1123 r_levels[2] = s2;
1124 r_levels[3] = s3;
1125
1126 r_levels += 4;
1127 }
1128
1129 spin_unlock_irqrestore(&chip->msg_lock, flags);
1130 return err;
1131}
1132
1133/* interrupt handling */
1134#define PCX_IRQ_NONE 0
1135#define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */
1136#define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */
1137#define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */
1138
1139static u32 lx_interrupt_test_ack(struct lx6464es *chip)
1140{
1141 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
1142
1143 /* Test if PCI Doorbell interrupt is active */
1144 if (irqcs & IRQCS_ACTIVE_PCIDB) {
1145 u32 temp;
1146 irqcs = PCX_IRQ_NONE;
1147
1148 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
1149 /* RAZ interrupt */
1150 irqcs |= temp;
1151 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
1152 }
1153
1154 return irqcs;
1155 }
1156 return PCX_IRQ_NONE;
1157}
1158
1159static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
1160 int *r_async_pending, int *r_async_escmd)
1161{
1162 u32 irq_async;
1163 u32 irqsrc = lx_interrupt_test_ack(chip);
1164
1165 if (irqsrc == PCX_IRQ_NONE)
1166 return 0;
1167
1168 *r_irqsrc = irqsrc;
1169
1170 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
1171 * (set by xilinx) + EOB */
1172
1173 if (irq_async & MASK_SYS_STATUS_ESA) {
1174 irq_async &= ~MASK_SYS_STATUS_ESA;
1175 *r_async_escmd = 1;
1176 }
1177
1178 if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
1179 /* xilinx command notification */
1180 atomic_set(&chip->send_message_locked, 0);
1181
1182 if (irq_async) {
1183 /* snd_printd("interrupt: async event pending\n"); */
1184 *r_async_pending = 1;
1185 }
1186
1187 return 1;
1188}
1189
1190static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
1191 int *r_freq_changed,
1192 u64 *r_notified_in_pipe_mask,
1193 u64 *r_notified_out_pipe_mask)
1194{
1195 int err;
1196 u32 stat[9]; /* answer from CMD_04_GET_EVENT */
1197
1198 /* On peut optimiser pour ne pas lire les evenements vides
1199 * les mots de réponse sont dans l'ordre suivant :
1200 * Stat[0] mot de status général
1201 * Stat[1] fin de buffer OUT pF
1202 * Stat[2] fin de buffer OUT pf
1203 * Stat[3] fin de buffer IN pF
1204 * Stat[4] fin de buffer IN pf
1205 * Stat[5] underrun poid fort
1206 * Stat[6] underrun poid faible
1207 * Stat[7] overrun poid fort
1208 * Stat[8] overrun poid faible
1209 * */
1210
1211 u64 orun_mask;
1212 u64 urun_mask;
1213#if 0
1214 int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0;
1215 int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0;
1216#endif
1217 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
1218 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
1219
1220 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
1221
1222 err = lx_dsp_read_async_events(chip, stat);
1223 if (err < 0)
1224 return err;
1225
1226 if (eb_pending_in) {
1227 *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
1228 + stat[4];
1229 snd_printdd(LXP "interrupt: EOBI pending %llx\n",
1230 *r_notified_in_pipe_mask);
1231 }
1232 if (eb_pending_out) {
1233 *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
1234 + stat[2];
1235 snd_printdd(LXP "interrupt: EOBO pending %llx\n",
1236 *r_notified_out_pipe_mask);
1237 }
1238
1239 orun_mask = ((u64)stat[7] << 32) + stat[8];
1240 urun_mask = ((u64)stat[5] << 32) + stat[6];
1241
1242 /* todo: handle xrun notification */
1243
1244 return err;
1245}
1246
1247static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
1248 struct lx_stream *lx_stream)
1249{
1250 struct snd_pcm_substream *substream = lx_stream->stream;
1251 int is_capture = lx_stream->is_capture;
1252 int err;
1253 unsigned long flags;
1254
1255 const u32 channels = substream->runtime->channels;
1256 const u32 bytes_per_frame = channels * 3;
1257 const u32 period_size = substream->runtime->period_size;
1258 const u32 period_bytes = period_size * bytes_per_frame;
1259 const u32 pos = lx_stream->frame_pos;
1260 const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
1261 0 : pos + 1;
1262
1263 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
1264 u32 buf_hi = 0;
1265 u32 buf_lo = 0;
1266 u32 buffer_index = 0;
1267
1268 u32 needed, freed;
1269 u32 size_array[MAX_STREAM_BUFFER];
1270
1271 snd_printdd("->lx_interrupt_request_new_buffer\n");
1272
1273 spin_lock_irqsave(&chip->lock, flags);
1274
1275 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
1276 snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed);
1277
1278 unpack_pointer(buf, &buf_lo, &buf_hi);
1279 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
1280 &buffer_index);
1281 snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n",
1282 buffer_index, (void *)buf, period_bytes);
1283
1284 lx_stream->frame_pos = next_pos;
1285 spin_unlock_irqrestore(&chip->lock, flags);
1286
1287 return err;
1288}
1289
1290void lx_tasklet_playback(unsigned long data)
1291{
1292 struct lx6464es *chip = (struct lx6464es *)data;
1293 struct lx_stream *lx_stream = &chip->playback_stream;
1294 int err;
1295
1296 snd_printdd("->lx_tasklet_playback\n");
1297
1298 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1299 if (err < 0)
1300 snd_printk(KERN_ERR LXP
1301 "cannot request new buffer for playback\n");
1302
1303 snd_pcm_period_elapsed(lx_stream->stream);
1304}
1305
1306void lx_tasklet_capture(unsigned long data)
1307{
1308 struct lx6464es *chip = (struct lx6464es *)data;
1309 struct lx_stream *lx_stream = &chip->capture_stream;
1310 int err;
1311
1312 snd_printdd("->lx_tasklet_capture\n");
1313 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1314 if (err < 0)
1315 snd_printk(KERN_ERR LXP
1316 "cannot request new buffer for capture\n");
1317
1318 snd_pcm_period_elapsed(lx_stream->stream);
1319}
1320
1321
1322
1323static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip,
1324 u64 notified_in_pipe_mask,
1325 u64 notified_out_pipe_mask)
1326{
1327 int err = 0;
1328
1329 if (notified_in_pipe_mask) {
1330 snd_printdd(LXP "requesting audio transfer for capture\n");
1331 tasklet_hi_schedule(&chip->tasklet_capture);
1332 }
1333
1334 if (notified_out_pipe_mask) {
1335 snd_printdd(LXP "requesting audio transfer for playback\n");
1336 tasklet_hi_schedule(&chip->tasklet_playback);
1337 }
1338
1339 return err;
1340}
1341
1342
1343irqreturn_t lx_interrupt(int irq, void *dev_id)
1344{
1345 struct lx6464es *chip = dev_id;
1346 int async_pending, async_escmd;
1347 u32 irqsrc;
1348
1349 spin_lock(&chip->lock);
1350
1351 snd_printdd("**************************************************\n");
1352
1353 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
1354 spin_unlock(&chip->lock);
1355 snd_printdd("IRQ_NONE\n");
1356 return IRQ_NONE; /* this device did not cause the interrupt */
1357 }
1358
1359 if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
1360 goto exit;
1361
1362#if 0
1363 if (irqsrc & MASK_SYS_STATUS_EOBI)
1364 snd_printdd(LXP "interrupt: EOBI\n");
1365
1366 if (irqsrc & MASK_SYS_STATUS_EOBO)
1367 snd_printdd(LXP "interrupt: EOBO\n");
1368
1369 if (irqsrc & MASK_SYS_STATUS_URUN)
1370 snd_printdd(LXP "interrupt: URUN\n");
1371
1372 if (irqsrc & MASK_SYS_STATUS_ORUN)
1373 snd_printdd(LXP "interrupt: ORUN\n");
1374#endif
1375
1376 if (async_pending) {
1377 u64 notified_in_pipe_mask = 0;
1378 u64 notified_out_pipe_mask = 0;
1379 int freq_changed;
1380 int err;
1381
1382 /* handle async events */
1383 err = lx_interrupt_handle_async_events(chip, irqsrc,
1384 &freq_changed,
1385 &notified_in_pipe_mask,
1386 &notified_out_pipe_mask);
1387 if (err)
1388 snd_printk(KERN_ERR LXP
1389 "error handling async events\n");
1390
1391 err = lx_interrupt_handle_audio_transfer(chip,
1392 notified_in_pipe_mask,
1393 notified_out_pipe_mask
1394 );
1395 if (err)
1396 snd_printk(KERN_ERR LXP
1397 "error during audio transfer\n");
1398 }
1399
1400 if (async_escmd) {
1401#if 0
1402 /* backdoor for ethersound commands
1403 *
1404 * for now, we do not need this
1405 *
1406 * */
1407
1408 snd_printdd("lx6464es: interrupt requests escmd handling\n");
1409#endif
1410 }
1411
1412exit:
1413 spin_unlock(&chip->lock);
1414 return IRQ_HANDLED; /* this device caused the interrupt */
1415}
1416
1417
1418static void lx_irq_set(struct lx6464es *chip, int enable)
1419{
1420 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
1421
1422 /* enable/disable interrupts
1423 *
1424 * Set the Doorbell and PCI interrupt enable bits
1425 *
1426 * */
1427 if (enable)
1428 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1429 else
1430 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1431 lx_plx_reg_write(chip, ePLX_IRQCS, reg);
1432}
1433
1434void lx_irq_enable(struct lx6464es *chip)
1435{
1436 snd_printdd("->lx_irq_enable\n");
1437 lx_irq_set(chip, 1);
1438}
1439
1440void lx_irq_disable(struct lx6464es *chip)
1441{
1442 snd_printdd("->lx_irq_disable\n");
1443 lx_irq_set(chip, 0);
1444}
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
new file mode 100644
index 000000000000..6bd9cbbbc68d
--- /dev/null
+++ b/sound/pci/lx6464es/lx_core.h
@@ -0,0 +1,242 @@
1/* -*- linux-c -*- *
2 *
3 * ALSA driver for the digigram lx6464es interface
4 * low-level interface
5 *
6 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
22 *
23 */
24
25#ifndef LX_CORE_H
26#define LX_CORE_H
27
28#include <linux/interrupt.h>
29
30#include "lx_defs.h"
31
32#define REG_CRM_NUMBER 12
33
34struct lx6464es;
35
36/* low-level register access */
37
38/* dsp register access */
39enum {
40 eReg_BASE,
41 eReg_CSM,
42 eReg_CRM1,
43 eReg_CRM2,
44 eReg_CRM3,
45 eReg_CRM4,
46 eReg_CRM5,
47 eReg_CRM6,
48 eReg_CRM7,
49 eReg_CRM8,
50 eReg_CRM9,
51 eReg_CRM10,
52 eReg_CRM11,
53 eReg_CRM12,
54
55 eReg_ICR,
56 eReg_CVR,
57 eReg_ISR,
58 eReg_RXHTXH,
59 eReg_RXMTXM,
60 eReg_RHLTXL,
61 eReg_RESETDSP,
62
63 eReg_CSUF,
64 eReg_CSES,
65 eReg_CRESMSB,
66 eReg_CRESLSB,
67 eReg_ADMACESMSB,
68 eReg_ADMACESLSB,
69 eReg_CONFES,
70
71 eMaxPortLx
72};
73
74unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
75void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
76void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
77void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
78 u32 len);
79
80/* plx register access */
81enum {
82 ePLX_PCICR,
83
84 ePLX_MBOX0,
85 ePLX_MBOX1,
86 ePLX_MBOX2,
87 ePLX_MBOX3,
88 ePLX_MBOX4,
89 ePLX_MBOX5,
90 ePLX_MBOX6,
91 ePLX_MBOX7,
92
93 ePLX_L2PCIDB,
94 ePLX_IRQCS,
95 ePLX_CHIPSC,
96
97 eMaxPort
98};
99
100unsigned long lx_plx_reg_read(struct lx6464es *chip, int port);
101void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data);
102
103/* rhm */
104struct lx_rmh {
105 u16 cmd_len; /* length of the command to send (WORDs) */
106 u16 stat_len; /* length of the status received (WORDs) */
107 u16 dsp_stat; /* status type, RMP_SSIZE_XXX */
108 u16 cmd_idx; /* index of the command */
109 u32 cmd[REG_CRM_NUMBER];
110 u32 stat[REG_CRM_NUMBER];
111};
112
113
114/* low-level dsp access */
115int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version);
116int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq);
117int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran);
118int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data);
119int lx_dsp_get_mac(struct lx6464es *chip, u8 *mac_address);
120
121
122/* low-level pipe handling */
123int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
124 int channels);
125int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture);
126int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
127 u64 *rsample_count);
128int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate);
129int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture);
130int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture);
131int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture);
132
133int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture);
134int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture);
135
136/* low-level stream handling */
137int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
138 u32 pipe, int is_capture);
139int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
140 int *rstate);
141int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
142 u64 *r_bytepos);
143
144int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
145 int is_capture, enum stream_state_t state);
146
147static inline int lx_stream_start(struct lx6464es *chip, u32 pipe,
148 int is_capture)
149{
150 snd_printdd("->lx_stream_start\n");
151 return lx_stream_set_state(chip, pipe, is_capture, SSTATE_RUN);
152}
153
154static inline int lx_stream_pause(struct lx6464es *chip, u32 pipe,
155 int is_capture)
156{
157 snd_printdd("->lx_stream_pause\n");
158 return lx_stream_set_state(chip, pipe, is_capture, SSTATE_PAUSE);
159}
160
161static inline int lx_stream_stop(struct lx6464es *chip, u32 pipe,
162 int is_capture)
163{
164 snd_printdd("->lx_stream_stop\n");
165 return lx_stream_set_state(chip, pipe, is_capture, SSTATE_STOP);
166}
167
168/* low-level buffer handling */
169int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
170 u32 *r_needed, u32 *r_freed, u32 *size_array);
171int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
172 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
173 u32 *r_buffer_index);
174int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
175 u32 *r_buffer_size);
176int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
177 u32 buffer_index);
178
179/* low-level gain/peak handling */
180int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute);
181int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
182 u32 *r_levels);
183
184
185/* interrupt handling */
186irqreturn_t lx_interrupt(int irq, void *dev_id);
187void lx_irq_enable(struct lx6464es *chip);
188void lx_irq_disable(struct lx6464es *chip);
189
190void lx_tasklet_capture(unsigned long data);
191void lx_tasklet_playback(unsigned long data);
192
193
194/* Stream Format Header Defines (for LIN and IEEE754) */
195#define HEADER_FMT_BASE HEADER_FMT_BASE_LIN
196#define HEADER_FMT_BASE_LIN 0xFED00000
197#define HEADER_FMT_BASE_FLOAT 0xFAD00000
198#define HEADER_FMT_MONO 0x00000080 /* bit 23 in header_lo. WARNING: old
199 * bit 22 is ignored in float
200 * format */
201#define HEADER_FMT_INTEL 0x00008000
202#define HEADER_FMT_16BITS 0x00002000
203#define HEADER_FMT_24BITS 0x00004000
204#define HEADER_FMT_UPTO11 0x00000200 /* frequency is less or equ. to 11k.
205 * */
206#define HEADER_FMT_UPTO32 0x00000100 /* frequency is over 11k and less
207 * then 32k.*/
208
209
210#define BIT_FMP_HEADER 23
211#define BIT_FMP_SD 22
212#define BIT_FMP_MULTICHANNEL 19
213
214#define START_STATE 1
215#define PAUSE_STATE 0
216
217
218
219
220
221/* from PcxAll_e.h */
222/* Start/Pause condition for pipes (PCXStartPipe, PCXPausePipe) */
223#define START_PAUSE_IMMEDIATE 0
224#define START_PAUSE_ON_SYNCHRO 1
225#define START_PAUSE_ON_TIME_CODE 2
226
227
228/* Pipe / Stream state */
229#define START_STATE 1
230#define PAUSE_STATE 0
231
232static inline void unpack_pointer(dma_addr_t ptr, u32 *r_low, u32 *r_high)
233{
234 *r_low = (u32)(ptr & 0xffffffff);
235#if BITS_PER_LONG == 32
236 *r_high = 0;
237#else
238 *r_high = (u32)((u64)ptr>>32);
239#endif
240}
241
242#endif /* LX_CORE_H */
diff --git a/sound/pci/lx6464es/lx_defs.h b/sound/pci/lx6464es/lx_defs.h
new file mode 100644
index 000000000000..49d36bdd512c
--- /dev/null
+++ b/sound/pci/lx6464es/lx_defs.h
@@ -0,0 +1,376 @@
1/* -*- linux-c -*- *
2 *
3 * ALSA driver for the digigram lx6464es interface
4 * adapted upstream headers
5 *
6 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
22 *
23 */
24
25#ifndef LX_DEFS_H
26#define LX_DEFS_H
27
28/* code adapted from ethersound.h */
29#define XES_FREQ_COUNT8_MASK 0x00001FFF /* compteur 25MHz entre 8 ech. */
30#define XES_FREQ_COUNT8_44_MIN 0x00001288 /* 25M /
31 * [ 44k - ( 44.1k + 48k ) / 2 ]
32 * * 8 */
33#define XES_FREQ_COUNT8_44_MAX 0x000010F0 /* 25M / [ ( 44.1k + 48k ) / 2 ]
34 * * 8 */
35#define XES_FREQ_COUNT8_48_MAX 0x00000F08 /* 25M /
36 * [ 48k + ( 44.1k + 48k ) / 2 ]
37 * * 8 */
38
39/* code adapted from LXES_registers.h */
40
41#define IOCR_OUTPUTS_OFFSET 0 /* (rw) offset for the number of OUTs in the
42 * ConfES register. */
43#define IOCR_INPUTS_OFFSET 8 /* (rw) offset for the number of INs in the
44 * ConfES register. */
45#define FREQ_RATIO_OFFSET 19 /* (rw) offset for frequency ratio in the
46 * ConfES register. */
47#define FREQ_RATIO_SINGLE_MODE 0x01 /* value for single mode frequency ratio:
48 * sample rate = frequency rate. */
49
50#define CONFES_READ_PART_MASK 0x00070000
51#define CONFES_WRITE_PART_MASK 0x00F80000
52
53/* code adapted from if_drv_mb.h */
54
55#define MASK_SYS_STATUS_ERROR (1L << 31) /* events that lead to a PCI irq if
56 * not yet pending */
57#define MASK_SYS_STATUS_URUN (1L << 30)
58#define MASK_SYS_STATUS_ORUN (1L << 29)
59#define MASK_SYS_STATUS_EOBO (1L << 28)
60#define MASK_SYS_STATUS_EOBI (1L << 27)
61#define MASK_SYS_STATUS_FREQ (1L << 26)
62#define MASK_SYS_STATUS_ESA (1L << 25) /* reserved, this is set by the
63 * XES */
64#define MASK_SYS_STATUS_TIMER (1L << 24)
65
66#define MASK_SYS_ASYNC_EVENTS (MASK_SYS_STATUS_ERROR | \
67 MASK_SYS_STATUS_URUN | \
68 MASK_SYS_STATUS_ORUN | \
69 MASK_SYS_STATUS_EOBO | \
70 MASK_SYS_STATUS_EOBI | \
71 MASK_SYS_STATUS_FREQ | \
72 MASK_SYS_STATUS_ESA)
73
74#define MASK_SYS_PCI_EVENTS (MASK_SYS_ASYNC_EVENTS | \
75 MASK_SYS_STATUS_TIMER)
76
77#define MASK_SYS_TIMER_COUNT 0x0000FFFF
78
79#define MASK_SYS_STATUS_EOT_PLX (1L << 22) /* event that remains
80 * internal: reserved fo end
81 * of plx dma */
82#define MASK_SYS_STATUS_XES (1L << 21) /* event that remains
83 * internal: pending XES
84 * IRQ */
85#define MASK_SYS_STATUS_CMD_DONE (1L << 20) /* alternate command
86 * management: notify driver
87 * instead of polling */
88
89
90#define MAX_STREAM_BUFFER 5 /* max amount of stream buffers. */
91
92#define MICROBLAZE_IBL_MIN 32
93#define MICROBLAZE_IBL_DEFAULT 128
94#define MICROBLAZE_IBL_MAX 512
95/* #define MASK_GRANULARITY (2*MICROBLAZE_IBL_MAX-1) */
96
97
98
99/* command opcodes, see reference for details */
100
101/*
102 the capture bit position in the object_id field in driver commands
103 depends upon the number of managed channels. For now, 64 IN + 64 OUT are
104 supported. HOwever, the communication protocol forsees 1024 channels, hence
105 bit 10 indicates a capture (input) object).
106*/
107#define ID_IS_CAPTURE (1L << 10)
108#define ID_OFFSET 13 /* object ID is at the 13th bit in the
109 * 1st command word.*/
110#define ID_CH_MASK 0x3F
111#define OPCODE_OFFSET 24 /* offset of the command opcode in the first
112 * command word.*/
113
114enum cmd_mb_opcodes {
115 CMD_00_INFO_DEBUG = 0x00,
116 CMD_01_GET_SYS_CFG = 0x01,
117 CMD_02_SET_GRANULARITY = 0x02,
118 CMD_03_SET_TIMER_IRQ = 0x03,
119 CMD_04_GET_EVENT = 0x04,
120 CMD_05_GET_PIPES = 0x05,
121
122 CMD_06_ALLOCATE_PIPE = 0x06,
123 CMD_07_RELEASE_PIPE = 0x07,
124 CMD_08_ASK_BUFFERS = 0x08,
125 CMD_09_STOP_PIPE = 0x09,
126 CMD_0A_GET_PIPE_SPL_COUNT = 0x0a,
127 CMD_0B_TOGGLE_PIPE_STATE = 0x0b,
128
129 CMD_0C_DEF_STREAM = 0x0c,
130 CMD_0D_SET_MUTE = 0x0d,
131 CMD_0E_GET_STREAM_SPL_COUNT = 0x0e,
132 CMD_0F_UPDATE_BUFFER = 0x0f,
133 CMD_10_GET_BUFFER = 0x10,
134 CMD_11_CANCEL_BUFFER = 0x11,
135 CMD_12_GET_PEAK = 0x12,
136 CMD_13_SET_STREAM_STATE = 0x13,
137 CMD_14_INVALID = 0x14,
138};
139
140/* pipe states */
141enum pipe_state_t {
142 PSTATE_IDLE = 0, /* the pipe is not processed in the XES_IRQ
143 * (free or stopped, or paused). */
144 PSTATE_RUN = 1, /* sustained play/record state. */
145 PSTATE_PURGE = 2, /* the ES channels are now off, render pipes do
146 * not DMA, record pipe do a last DMA. */
147 PSTATE_ACQUIRE = 3, /* the ES channels are now on, render pipes do
148 * not yet increase their sample count, record
149 * pipes do not DMA. */
150 PSTATE_CLOSING = 4, /* the pipe is releasing, and may not yet
151 * receive an "alloc" command. */
152};
153
154/* stream states */
155enum stream_state_t {
156 SSTATE_STOP = 0x00, /* setting to stop resets the stream spl
157 * count.*/
158 SSTATE_RUN = (0x01 << 0), /* start DMA and spl count handling. */
159 SSTATE_PAUSE = (0x01 << 1), /* pause DMA and spl count handling. */
160};
161
162/* buffer flags */
163enum buffer_flags {
164 BF_VALID = 0x80, /* set if the buffer is valid, clear if free.*/
165 BF_CURRENT = 0x40, /* set if this is the current buffer (there is
166 * always a current buffer).*/
167 BF_NOTIFY_EOB = 0x20, /* set if this buffer must cause a PCI event
168 * when finished.*/
169 BF_CIRCULAR = 0x10, /* set if buffer[1] must be copied to buffer[0]
170 * by the end of this buffer.*/
171 BF_64BITS_ADR = 0x08, /* set if the hi part of the address is valid.*/
172 BF_xx = 0x04, /* future extension.*/
173 BF_EOB = 0x02, /* set if finished, but not yet free.*/
174 BF_PAUSE = 0x01, /* pause stream at buffer end.*/
175 BF_ZERO = 0x00, /* no flags (init).*/
176};
177
178/**
179* Stream Flags definitions
180*/
181enum stream_flags {
182 SF_ZERO = 0x00000000, /* no flags (stream invalid). */
183 SF_VALID = 0x10000000, /* the stream has a valid DMA_conf
184 * info (setstreamformat). */
185 SF_XRUN = 0x20000000, /* the stream is un x-run state. */
186 SF_START = 0x40000000, /* the DMA is running.*/
187 SF_ASIO = 0x80000000, /* ASIO.*/
188};
189
190
191#define MASK_SPL_COUNT_HI 0x00FFFFFF /* 4 MSBits are status bits */
192#define PSTATE_OFFSET 28 /* 4 MSBits are status bits */
193
194
195#define MASK_STREAM_HAS_MAPPING (1L << 12)
196#define MASK_STREAM_IS_ASIO (1L << 9)
197#define STREAM_FMT_OFFSET 10 /* the stream fmt bits start at the 10th
198 * bit in the command word. */
199
200#define STREAM_FMT_16b 0x02
201#define STREAM_FMT_intel 0x01
202
203#define FREQ_FIELD_OFFSET 15 /* offset of the freq field in the response
204 * word */
205
206#define BUFF_FLAGS_OFFSET 24 /* offset of the buffer flags in the
207 * response word. */
208#define MASK_DATA_SIZE 0x00FFFFFF /* this must match the field size of
209 * datasize in the buffer_t structure. */
210
211#define MASK_BUFFER_ID 0xFF /* the cancel command awaits a buffer ID,
212 * may be 0xFF for "current". */
213
214
215/* code adapted from PcxErr_e.h */
216
217/* Bits masks */
218
219#define ERROR_MASK 0x8000
220
221#define SOURCE_MASK 0x7800
222
223#define E_SOURCE_BOARD 0x4000 /* 8 >> 1 */
224#define E_SOURCE_DRV 0x2000 /* 4 >> 1 */
225#define E_SOURCE_API 0x1000 /* 2 >> 1 */
226/* Error tools */
227#define E_SOURCE_TOOLS 0x0800 /* 1 >> 1 */
228/* Error pcxaudio */
229#define E_SOURCE_AUDIO 0x1800 /* 3 >> 1 */
230/* Error virtual pcx */
231#define E_SOURCE_VPCX 0x2800 /* 5 >> 1 */
232/* Error dispatcher */
233#define E_SOURCE_DISPATCHER 0x3000 /* 6 >> 1 */
234/* Error from CobraNet firmware */
235#define E_SOURCE_COBRANET 0x3800 /* 7 >> 1 */
236
237#define E_SOURCE_USER 0x7800
238
239#define CLASS_MASK 0x0700
240
241#define CODE_MASK 0x00FF
242
243/* Bits values */
244
245/* Values for the error/warning bit */
246#define ERROR_VALUE 0x8000
247#define WARNING_VALUE 0x0000
248
249/* Class values */
250#define E_CLASS_GENERAL 0x0000
251#define E_CLASS_INVALID_CMD 0x0100
252#define E_CLASS_INVALID_STD_OBJECT 0x0200
253#define E_CLASS_RSRC_IMPOSSIBLE 0x0300
254#define E_CLASS_WRONG_CONTEXT 0x0400
255#define E_CLASS_BAD_SPECIFIC_PARAMETER 0x0500
256#define E_CLASS_REAL_TIME_ERROR 0x0600
257#define E_CLASS_DIRECTSHOW 0x0700
258#define E_CLASS_FREE 0x0700
259
260
261/* Complete DRV error code for the general class */
262#define ED_GN (ERROR_VALUE | E_SOURCE_DRV | E_CLASS_GENERAL)
263#define ED_CONCURRENCY (ED_GN | 0x01)
264#define ED_DSP_CRASHED (ED_GN | 0x02)
265#define ED_UNKNOWN_BOARD (ED_GN | 0x03)
266#define ED_NOT_INSTALLED (ED_GN | 0x04)
267#define ED_CANNOT_OPEN_SVC_MANAGER (ED_GN | 0x05)
268#define ED_CANNOT_READ_REGISTRY (ED_GN | 0x06)
269#define ED_DSP_VERSION_MISMATCH (ED_GN | 0x07)
270#define ED_UNAVAILABLE_FEATURE (ED_GN | 0x08)
271#define ED_CANCELLED (ED_GN | 0x09)
272#define ED_NO_RESPONSE_AT_IRQA (ED_GN | 0x10)
273#define ED_INVALID_ADDRESS (ED_GN | 0x11)
274#define ED_DSP_CORRUPTED (ED_GN | 0x12)
275#define ED_PENDING_OPERATION (ED_GN | 0x13)
276#define ED_NET_ALLOCATE_MEMORY_IMPOSSIBLE (ED_GN | 0x14)
277#define ED_NET_REGISTER_ERROR (ED_GN | 0x15)
278#define ED_NET_THREAD_ERROR (ED_GN | 0x16)
279#define ED_NET_OPEN_ERROR (ED_GN | 0x17)
280#define ED_NET_CLOSE_ERROR (ED_GN | 0x18)
281#define ED_NET_NO_MORE_PACKET (ED_GN | 0x19)
282#define ED_NET_NO_MORE_BUFFER (ED_GN | 0x1A)
283#define ED_NET_SEND_ERROR (ED_GN | 0x1B)
284#define ED_NET_RECEIVE_ERROR (ED_GN | 0x1C)
285#define ED_NET_WRONG_MSG_SIZE (ED_GN | 0x1D)
286#define ED_NET_WAIT_ERROR (ED_GN | 0x1E)
287#define ED_NET_EEPROM_ERROR (ED_GN | 0x1F)
288#define ED_INVALID_RS232_COM_NUMBER (ED_GN | 0x20)
289#define ED_INVALID_RS232_INIT (ED_GN | 0x21)
290#define ED_FILE_ERROR (ED_GN | 0x22)
291#define ED_INVALID_GPIO_CMD (ED_GN | 0x23)
292#define ED_RS232_ALREADY_OPENED (ED_GN | 0x24)
293#define ED_RS232_NOT_OPENED (ED_GN | 0x25)
294#define ED_GPIO_ALREADY_OPENED (ED_GN | 0x26)
295#define ED_GPIO_NOT_OPENED (ED_GN | 0x27)
296#define ED_REGISTRY_ERROR (ED_GN | 0x28) /* <- NCX */
297#define ED_INVALID_SERVICE (ED_GN | 0x29) /* <- NCX */
298
299#define ED_READ_FILE_ALREADY_OPENED (ED_GN | 0x2a) /* <- Decalage
300 * pour RCX
301 * (old 0x28)
302 * */
303#define ED_READ_FILE_INVALID_COMMAND (ED_GN | 0x2b) /* ~ */
304#define ED_READ_FILE_INVALID_PARAMETER (ED_GN | 0x2c) /* ~ */
305#define ED_READ_FILE_ALREADY_CLOSED (ED_GN | 0x2d) /* ~ */
306#define ED_READ_FILE_NO_INFORMATION (ED_GN | 0x2e) /* ~ */
307#define ED_READ_FILE_INVALID_HANDLE (ED_GN | 0x2f) /* ~ */
308#define ED_READ_FILE_END_OF_FILE (ED_GN | 0x30) /* ~ */
309#define ED_READ_FILE_ERROR (ED_GN | 0x31) /* ~ */
310
311#define ED_DSP_CRASHED_EXC_DSPSTACK_OVERFLOW (ED_GN | 0x32) /* <- Decalage pour
312 * PCX (old 0x14) */
313#define ED_DSP_CRASHED_EXC_SYSSTACK_OVERFLOW (ED_GN | 0x33) /* ~ */
314#define ED_DSP_CRASHED_EXC_ILLEGAL (ED_GN | 0x34) /* ~ */
315#define ED_DSP_CRASHED_EXC_TIMER_REENTRY (ED_GN | 0x35) /* ~ */
316#define ED_DSP_CRASHED_EXC_FATAL_ERROR (ED_GN | 0x36) /* ~ */
317
318#define ED_FLASH_PCCARD_NOT_PRESENT (ED_GN | 0x37)
319
320#define ED_NO_CURRENT_CLOCK (ED_GN | 0x38)
321
322/* Complete DRV error code for real time class */
323#define ED_RT (ERROR_VALUE | E_SOURCE_DRV | E_CLASS_REAL_TIME_ERROR)
324#define ED_DSP_TIMED_OUT (ED_RT | 0x01)
325#define ED_DSP_CHK_TIMED_OUT (ED_RT | 0x02)
326#define ED_STREAM_OVERRUN (ED_RT | 0x03)
327#define ED_DSP_BUSY (ED_RT | 0x04)
328#define ED_DSP_SEMAPHORE_TIME_OUT (ED_RT | 0x05)
329#define ED_BOARD_TIME_OUT (ED_RT | 0x06)
330#define ED_XILINX_ERROR (ED_RT | 0x07)
331#define ED_COBRANET_ITF_NOT_RESPONDING (ED_RT | 0x08)
332
333/* Complete BOARD error code for the invaid standard object class */
334#define EB_ISO (ERROR_VALUE | E_SOURCE_BOARD | \
335 E_CLASS_INVALID_STD_OBJECT)
336#define EB_INVALID_EFFECT (EB_ISO | 0x00)
337#define EB_INVALID_PIPE (EB_ISO | 0x40)
338#define EB_INVALID_STREAM (EB_ISO | 0x80)
339#define EB_INVALID_AUDIO (EB_ISO | 0xC0)
340
341/* Complete BOARD error code for impossible resource allocation class */
342#define EB_RI (ERROR_VALUE | E_SOURCE_BOARD | E_CLASS_RSRC_IMPOSSIBLE)
343#define EB_ALLOCATE_ALL_STREAM_TRANSFERT_BUFFERS_IMPOSSIBLE (EB_RI | 0x01)
344#define EB_ALLOCATE_PIPE_SAMPLE_BUFFER_IMPOSSIBLE (EB_RI | 0x02)
345
346#define EB_ALLOCATE_MEM_STREAM_IMPOSSIBLE \
347 EB_ALLOCATE_ALL_STREAM_TRANSFERT_BUFFERS_IMPOSSIBLE
348#define EB_ALLOCATE_MEM_PIPE_IMPOSSIBLE \
349 EB_ALLOCATE_PIPE_SAMPLE_BUFFER_IMPOSSIBLE
350
351#define EB_ALLOCATE_DIFFERED_CMD_IMPOSSIBLE (EB_RI | 0x03)
352#define EB_TOO_MANY_DIFFERED_CMD (EB_RI | 0x04)
353#define EB_RBUFFERS_TABLE_OVERFLOW (EB_RI | 0x05)
354#define EB_ALLOCATE_EFFECTS_IMPOSSIBLE (EB_RI | 0x08)
355#define EB_ALLOCATE_EFFECT_POS_IMPOSSIBLE (EB_RI | 0x09)
356#define EB_RBUFFER_NOT_AVAILABLE (EB_RI | 0x0A)
357#define EB_ALLOCATE_CONTEXT_LIII_IMPOSSIBLE (EB_RI | 0x0B)
358#define EB_STATUS_DIALOG_IMPOSSIBLE (EB_RI | 0x1D)
359#define EB_CONTROL_CMD_IMPOSSIBLE (EB_RI | 0x1E)
360#define EB_STATUS_SEND_IMPOSSIBLE (EB_RI | 0x1F)
361#define EB_ALLOCATE_PIPE_IMPOSSIBLE (EB_RI | 0x40)
362#define EB_ALLOCATE_STREAM_IMPOSSIBLE (EB_RI | 0x80)
363#define EB_ALLOCATE_AUDIO_IMPOSSIBLE (EB_RI | 0xC0)
364
365/* Complete BOARD error code for wrong call context class */
366#define EB_WCC (ERROR_VALUE | E_SOURCE_BOARD | E_CLASS_WRONG_CONTEXT)
367#define EB_CMD_REFUSED (EB_WCC | 0x00)
368#define EB_START_STREAM_REFUSED (EB_WCC | 0xFC)
369#define EB_SPC_REFUSED (EB_WCC | 0xFD)
370#define EB_CSN_REFUSED (EB_WCC | 0xFE)
371#define EB_CSE_REFUSED (EB_WCC | 0xFF)
372
373
374
375
376#endif /* LX_DEFS_H */
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index c262049961e1..3b5ca70c9d4d 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -487,10 +487,14 @@ static int oxygen_hw_free(struct snd_pcm_substream *substream)
487{ 487{
488 struct oxygen *chip = snd_pcm_substream_chip(substream); 488 struct oxygen *chip = snd_pcm_substream_chip(substream);
489 unsigned int channel = oxygen_substream_channel(substream); 489 unsigned int channel = oxygen_substream_channel(substream);
490 unsigned int channel_mask = 1 << channel;
490 491
491 spin_lock_irq(&chip->reg_lock); 492 spin_lock_irq(&chip->reg_lock);
492 chip->interrupt_mask &= ~(1 << channel); 493 chip->interrupt_mask &= ~channel_mask;
493 oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask); 494 oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
495
496 oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
497 oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
494 spin_unlock_irq(&chip->reg_lock); 498 spin_unlock_irq(&chip->reg_lock);
495 499
496 return snd_pcm_lib_free_pages(substream); 500 return snd_pcm_lib_free_pages(substream);
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index bc5ce11c8b14..bf971f7cfdc6 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -113,8 +113,8 @@
113 */ 113 */
114 114
115/* 115/*
116 * Xonar Essence STX 116 * Xonar Essence ST (Deluxe)/STX
117 * ----------------- 117 * -----------------------------
118 * 118 *
119 * CMI8788: 119 * CMI8788:
120 * 120 *
@@ -180,6 +180,8 @@ enum {
180 MODEL_DX, 180 MODEL_DX,
181 MODEL_HDAV, /* without daughterboard */ 181 MODEL_HDAV, /* without daughterboard */
182 MODEL_HDAV_H6, /* with H6 daughterboard */ 182 MODEL_HDAV_H6, /* with H6 daughterboard */
183 MODEL_ST,
184 MODEL_ST_H6,
183 MODEL_STX, 185 MODEL_STX,
184}; 186};
185 187
@@ -188,8 +190,10 @@ static struct pci_device_id xonar_ids[] __devinitdata = {
188 { OXYGEN_PCI_SUBID(0x1043, 0x8275), .driver_data = MODEL_DX }, 190 { OXYGEN_PCI_SUBID(0x1043, 0x8275), .driver_data = MODEL_DX },
189 { OXYGEN_PCI_SUBID(0x1043, 0x82b7), .driver_data = MODEL_D2X }, 191 { OXYGEN_PCI_SUBID(0x1043, 0x82b7), .driver_data = MODEL_D2X },
190 { OXYGEN_PCI_SUBID(0x1043, 0x8314), .driver_data = MODEL_HDAV }, 192 { OXYGEN_PCI_SUBID(0x1043, 0x8314), .driver_data = MODEL_HDAV },
193 { OXYGEN_PCI_SUBID(0x1043, 0x8327), .driver_data = MODEL_DX },
191 { OXYGEN_PCI_SUBID(0x1043, 0x834f), .driver_data = MODEL_D1 }, 194 { OXYGEN_PCI_SUBID(0x1043, 0x834f), .driver_data = MODEL_D1 },
192 { OXYGEN_PCI_SUBID(0x1043, 0x835c), .driver_data = MODEL_STX }, 195 { OXYGEN_PCI_SUBID(0x1043, 0x835c), .driver_data = MODEL_STX },
196 { OXYGEN_PCI_SUBID(0x1043, 0x835d), .driver_data = MODEL_ST },
193 { OXYGEN_PCI_SUBID_BROKEN_EEPROM }, 197 { OXYGEN_PCI_SUBID_BROKEN_EEPROM },
194 { } 198 { }
195}; 199};
@@ -210,9 +214,9 @@ MODULE_DEVICE_TABLE(pci, xonar_ids);
210#define GPIO_DX_FRONT_PANEL 0x0002 214#define GPIO_DX_FRONT_PANEL 0x0002
211#define GPIO_DX_INPUT_ROUTE 0x0100 215#define GPIO_DX_INPUT_ROUTE 0x0100
212 216
213#define GPIO_HDAV_DB_MASK 0x0030 217#define GPIO_DB_MASK 0x0030
214#define GPIO_HDAV_DB_H6 0x0000 218#define GPIO_DB_H6 0x0000
215#define GPIO_HDAV_DB_XX 0x0020 219#define GPIO_DB_XX 0x0020
216 220
217#define GPIO_ST_HP_REAR 0x0002 221#define GPIO_ST_HP_REAR 0x0002
218#define GPIO_ST_HP 0x0080 222#define GPIO_ST_HP 0x0080
@@ -530,7 +534,7 @@ static void xonar_hdav_init(struct oxygen *chip)
530 snd_component_add(chip->card, "CS5381"); 534 snd_component_add(chip->card, "CS5381");
531} 535}
532 536
533static void xonar_stx_init(struct oxygen *chip) 537static void xonar_st_init(struct oxygen *chip)
534{ 538{
535 struct xonar_data *data = chip->model_data; 539 struct xonar_data *data = chip->model_data;
536 540
@@ -539,12 +543,11 @@ static void xonar_stx_init(struct oxygen *chip)
539 OXYGEN_2WIRE_INTERRUPT_MASK | 543 OXYGEN_2WIRE_INTERRUPT_MASK |
540 OXYGEN_2WIRE_SPEED_FAST); 544 OXYGEN_2WIRE_SPEED_FAST);
541 545
546 if (chip->model.private_data == MODEL_ST_H6)
547 chip->model.dac_channels = 8;
542 data->anti_pop_delay = 100; 548 data->anti_pop_delay = 100;
543 data->dacs = 1; 549 data->dacs = chip->model.private_data == MODEL_ST_H6 ? 4 : 1;
544 data->output_enable_bit = GPIO_DX_OUTPUT_ENABLE; 550 data->output_enable_bit = GPIO_DX_OUTPUT_ENABLE;
545 data->ext_power_reg = OXYGEN_GPI_DATA;
546 data->ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
547 data->ext_power_bit = GPI_DX_EXT_POWER;
548 data->pcm1796_oversampling = PCM1796_OS_64; 551 data->pcm1796_oversampling = PCM1796_OS_64;
549 552
550 pcm1796_init(chip); 553 pcm1796_init(chip);
@@ -560,6 +563,17 @@ static void xonar_stx_init(struct oxygen *chip)
560 snd_component_add(chip->card, "CS5381"); 563 snd_component_add(chip->card, "CS5381");
561} 564}
562 565
566static void xonar_stx_init(struct oxygen *chip)
567{
568 struct xonar_data *data = chip->model_data;
569
570 data->ext_power_reg = OXYGEN_GPI_DATA;
571 data->ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
572 data->ext_power_bit = GPI_DX_EXT_POWER;
573
574 xonar_st_init(chip);
575}
576
563static void xonar_disable_output(struct oxygen *chip) 577static void xonar_disable_output(struct oxygen *chip)
564{ 578{
565 struct xonar_data *data = chip->model_data; 579 struct xonar_data *data = chip->model_data;
@@ -1021,7 +1035,8 @@ static const struct oxygen_model model_xonar_hdav = {
1021 .model_data_size = sizeof(struct xonar_data), 1035 .model_data_size = sizeof(struct xonar_data),
1022 .device_config = PLAYBACK_0_TO_I2S | 1036 .device_config = PLAYBACK_0_TO_I2S |
1023 PLAYBACK_1_TO_SPDIF | 1037 PLAYBACK_1_TO_SPDIF |
1024 CAPTURE_0_FROM_I2S_2, 1038 CAPTURE_0_FROM_I2S_2 |
1039 CAPTURE_1_FROM_SPDIF,
1025 .dac_channels = 8, 1040 .dac_channels = 8,
1026 .dac_volume_min = 255 - 2*60, 1041 .dac_volume_min = 255 - 2*60,
1027 .dac_volume_max = 255, 1042 .dac_volume_max = 255,
@@ -1034,7 +1049,7 @@ static const struct oxygen_model model_xonar_hdav = {
1034static const struct oxygen_model model_xonar_st = { 1049static const struct oxygen_model model_xonar_st = {
1035 .longname = "Asus Virtuoso 100", 1050 .longname = "Asus Virtuoso 100",
1036 .chip = "AV200", 1051 .chip = "AV200",
1037 .init = xonar_stx_init, 1052 .init = xonar_st_init,
1038 .control_filter = xonar_st_control_filter, 1053 .control_filter = xonar_st_control_filter,
1039 .mixer_init = xonar_st_mixer_init, 1054 .mixer_init = xonar_st_mixer_init,
1040 .cleanup = xonar_st_cleanup, 1055 .cleanup = xonar_st_cleanup,
@@ -1067,6 +1082,7 @@ static int __devinit get_xonar_model(struct oxygen *chip,
1067 [MODEL_D2] = &model_xonar_d2, 1082 [MODEL_D2] = &model_xonar_d2,
1068 [MODEL_D2X] = &model_xonar_d2, 1083 [MODEL_D2X] = &model_xonar_d2,
1069 [MODEL_HDAV] = &model_xonar_hdav, 1084 [MODEL_HDAV] = &model_xonar_hdav,
1085 [MODEL_ST] = &model_xonar_st,
1070 [MODEL_STX] = &model_xonar_st, 1086 [MODEL_STX] = &model_xonar_st,
1071 }; 1087 };
1072 static const char *const names[] = { 1088 static const char *const names[] = {
@@ -1076,6 +1092,8 @@ static int __devinit get_xonar_model(struct oxygen *chip,
1076 [MODEL_D2X] = "Xonar D2X", 1092 [MODEL_D2X] = "Xonar D2X",
1077 [MODEL_HDAV] = "Xonar HDAV1.3", 1093 [MODEL_HDAV] = "Xonar HDAV1.3",
1078 [MODEL_HDAV_H6] = "Xonar HDAV1.3+H6", 1094 [MODEL_HDAV_H6] = "Xonar HDAV1.3+H6",
1095 [MODEL_ST] = "Xonar Essence ST",
1096 [MODEL_ST_H6] = "Xonar Essence ST+H6",
1079 [MODEL_STX] = "Xonar Essence STX", 1097 [MODEL_STX] = "Xonar Essence STX",
1080 }; 1098 };
1081 unsigned int model = id->driver_data; 1099 unsigned int model = id->driver_data;
@@ -1092,21 +1110,27 @@ static int __devinit get_xonar_model(struct oxygen *chip,
1092 chip->model.init = xonar_dx_init; 1110 chip->model.init = xonar_dx_init;
1093 break; 1111 break;
1094 case MODEL_HDAV: 1112 case MODEL_HDAV:
1095 oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, 1113 oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DB_MASK);
1096 GPIO_HDAV_DB_MASK); 1114 switch (oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DB_MASK) {
1097 switch (oxygen_read16(chip, OXYGEN_GPIO_DATA) & 1115 case GPIO_DB_H6:
1098 GPIO_HDAV_DB_MASK) {
1099 case GPIO_HDAV_DB_H6:
1100 model = MODEL_HDAV_H6; 1116 model = MODEL_HDAV_H6;
1101 break; 1117 break;
1102 case GPIO_HDAV_DB_XX: 1118 case GPIO_DB_XX:
1103 snd_printk(KERN_ERR "unknown daughterboard\n"); 1119 snd_printk(KERN_ERR "unknown daughterboard\n");
1104 return -ENODEV; 1120 return -ENODEV;
1105 } 1121 }
1106 break; 1122 break;
1123 case MODEL_ST:
1124 oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DB_MASK);
1125 switch (oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DB_MASK) {
1126 case GPIO_DB_H6:
1127 model = MODEL_ST_H6;
1128 break;
1129 }
1130 break;
1107 case MODEL_STX: 1131 case MODEL_STX:
1108 oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, 1132 chip->model.init = xonar_stx_init;
1109 GPIO_HDAV_DB_MASK); 1133 oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DB_MASK);
1110 break; 1134 break;
1111 } 1135 }
1112 1136
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index e51a5ef1954d..235a71e5ac8d 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -507,41 +507,19 @@ static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip);
507 */ 507 */
508 508
509static struct pci_device_id snd_riptide_ids[] = { 509static struct pci_device_id snd_riptide_ids[] = {
510 { 510 { PCI_DEVICE(0x127a, 0x4310) },
511 .vendor = 0x127a,.device = 0x4310, 511 { PCI_DEVICE(0x127a, 0x4320) },
512 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID, 512 { PCI_DEVICE(0x127a, 0x4330) },
513 }, 513 { PCI_DEVICE(0x127a, 0x4340) },
514 {
515 .vendor = 0x127a,.device = 0x4320,
516 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
517 },
518 {
519 .vendor = 0x127a,.device = 0x4330,
520 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
521 },
522 {
523 .vendor = 0x127a,.device = 0x4340,
524 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
525 },
526 {0,}, 514 {0,},
527}; 515};
528 516
529#ifdef SUPPORT_JOYSTICK 517#ifdef SUPPORT_JOYSTICK
530static struct pci_device_id snd_riptide_joystick_ids[] __devinitdata = { 518static struct pci_device_id snd_riptide_joystick_ids[] __devinitdata = {
531 { 519 { PCI_DEVICE(0x127a, 0x4312) },
532 .vendor = 0x127a,.device = 0x4312, 520 { PCI_DEVICE(0x127a, 0x4322) },
533 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID, 521 { PCI_DEVICE(0x127a, 0x4332) },
534 }, 522 { PCI_DEVICE(0x127a, 0x4342) },
535 {
536 .vendor = 0x127a,.device = 0x4322,
537 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
538 },
539 {.vendor = 0x127a,.device = 0x4332,
540 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
541 },
542 {.vendor = 0x127a,.device = 0x4342,
543 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
544 },
545 {0,}, 523 {0,},
546}; 524};
547#endif 525#endif
@@ -1209,12 +1187,79 @@ static int riptide_resume(struct pci_dev *pci)
1209} 1187}
1210#endif 1188#endif
1211 1189
1190static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
1191{
1192 union firmware_version firmware = { .ret = CMDRET_ZERO };
1193 int i, timeout, err;
1194
1195 for (i = 0; i < 2; i++) {
1196 WRITE_PORT_ULONG(cif->hwport->port[i].data1, 0);
1197 WRITE_PORT_ULONG(cif->hwport->port[i].data2, 0);
1198 }
1199 SET_GRESET(cif->hwport);
1200 udelay(100);
1201 UNSET_GRESET(cif->hwport);
1202 udelay(100);
1203
1204 for (timeout = 100000; --timeout; udelay(10)) {
1205 if (IS_READY(cif->hwport) && !IS_GERR(cif->hwport))
1206 break;
1207 }
1208 if (!timeout) {
1209 snd_printk(KERN_ERR
1210 "Riptide: device not ready, audio status: 0x%x "
1211 "ready: %d gerr: %d\n",
1212 READ_AUDIO_STATUS(cif->hwport),
1213 IS_READY(cif->hwport), IS_GERR(cif->hwport));
1214 return -EIO;
1215 } else {
1216 snd_printdd
1217 ("Riptide: audio status: 0x%x ready: %d gerr: %d\n",
1218 READ_AUDIO_STATUS(cif->hwport),
1219 IS_READY(cif->hwport), IS_GERR(cif->hwport));
1220 }
1221
1222 SEND_GETV(cif, &firmware.ret);
1223 snd_printdd("Firmware version: ASIC: %d CODEC %d AUXDSP %d PROG %d\n",
1224 firmware.firmware.ASIC, firmware.firmware.CODEC,
1225 firmware.firmware.AUXDSP, firmware.firmware.PROG);
1226
1227 for (i = 0; i < FIRMWARE_VERSIONS; i++) {
1228 if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
1229 break;
1230 }
1231 if (i >= FIRMWARE_VERSIONS)
1232 return 0; /* no match */
1233
1234 if (!chip)
1235 return 1; /* OK */
1236
1237 snd_printdd("Writing Firmware\n");
1238 if (!chip->fw_entry) {
1239 err = request_firmware(&chip->fw_entry, "riptide.hex",
1240 &chip->pci->dev);
1241 if (err) {
1242 snd_printk(KERN_ERR
1243 "Riptide: Firmware not available %d\n", err);
1244 return -EIO;
1245 }
1246 }
1247 err = loadfirmware(cif, chip->fw_entry->data, chip->fw_entry->size);
1248 if (err) {
1249 snd_printk(KERN_ERR
1250 "Riptide: Could not load firmware %d\n", err);
1251 return err;
1252 }
1253
1254 chip->firmware = firmware;
1255
1256 return 1; /* OK */
1257}
1258
1212static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip) 1259static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip)
1213{ 1260{
1214 int timeout, tries;
1215 union cmdret rptr = CMDRET_ZERO; 1261 union cmdret rptr = CMDRET_ZERO;
1216 union firmware_version firmware; 1262 int err, tries;
1217 int i, j, err, has_firmware;
1218 1263
1219 if (!cif) 1264 if (!cif)
1220 return -EINVAL; 1265 return -EINVAL;
@@ -1227,75 +1272,11 @@ static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip)
1227 cif->is_reset = 0; 1272 cif->is_reset = 0;
1228 1273
1229 tries = RESET_TRIES; 1274 tries = RESET_TRIES;
1230 has_firmware = 0; 1275 do {
1231 while (has_firmware == 0 && tries-- > 0) { 1276 err = try_to_load_firmware(cif, chip);
1232 for (i = 0; i < 2; i++) { 1277 if (err < 0)
1233 WRITE_PORT_ULONG(cif->hwport->port[i].data1, 0); 1278 return err;
1234 WRITE_PORT_ULONG(cif->hwport->port[i].data2, 0); 1279 } while (!err && --tries);
1235 }
1236 SET_GRESET(cif->hwport);
1237 udelay(100);
1238 UNSET_GRESET(cif->hwport);
1239 udelay(100);
1240
1241 for (timeout = 100000; --timeout; udelay(10)) {
1242 if (IS_READY(cif->hwport) && !IS_GERR(cif->hwport))
1243 break;
1244 }
1245 if (timeout == 0) {
1246 snd_printk(KERN_ERR
1247 "Riptide: device not ready, audio status: 0x%x ready: %d gerr: %d\n",
1248 READ_AUDIO_STATUS(cif->hwport),
1249 IS_READY(cif->hwport), IS_GERR(cif->hwport));
1250 return -EIO;
1251 } else {
1252 snd_printdd
1253 ("Riptide: audio status: 0x%x ready: %d gerr: %d\n",
1254 READ_AUDIO_STATUS(cif->hwport),
1255 IS_READY(cif->hwport), IS_GERR(cif->hwport));
1256 }
1257
1258 SEND_GETV(cif, &rptr);
1259 for (i = 0; i < 4; i++)
1260 firmware.ret.retwords[i] = rptr.retwords[i];
1261
1262 snd_printdd
1263 ("Firmware version: ASIC: %d CODEC %d AUXDSP %d PROG %d\n",
1264 firmware.firmware.ASIC, firmware.firmware.CODEC,
1265 firmware.firmware.AUXDSP, firmware.firmware.PROG);
1266
1267 for (j = 0; j < FIRMWARE_VERSIONS; j++) {
1268 has_firmware = 1;
1269 for (i = 0; i < 4; i++) {
1270 if (firmware_versions[j].ret.retwords[i] !=
1271 firmware.ret.retwords[i])
1272 has_firmware = 0;
1273 }
1274 if (has_firmware)
1275 break;
1276 }
1277
1278 if (chip != NULL && has_firmware == 0) {
1279 snd_printdd("Writing Firmware\n");
1280 if (!chip->fw_entry) {
1281 if ((err =
1282 request_firmware(&chip->fw_entry,
1283 "riptide.hex",
1284 &chip->pci->dev)) != 0) {
1285 snd_printk(KERN_ERR
1286 "Riptide: Firmware not available %d\n",
1287 err);
1288 return -EIO;
1289 }
1290 }
1291 err = loadfirmware(cif, chip->fw_entry->data,
1292 chip->fw_entry->size);
1293 if (err)
1294 snd_printk(KERN_ERR
1295 "Riptide: Could not load firmware %d\n",
1296 err);
1297 }
1298 }
1299 1280
1300 SEND_SACR(cif, 0, AC97_RESET); 1281 SEND_SACR(cif, 0, AC97_RESET);
1301 SEND_RACR(cif, AC97_RESET, &rptr); 1282 SEND_RACR(cif, AC97_RESET, &rptr);
@@ -1337,11 +1318,6 @@ static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip)
1337 SET_AIE(cif->hwport); 1318 SET_AIE(cif->hwport);
1338 SET_AIACK(cif->hwport); 1319 SET_AIACK(cif->hwport);
1339 cif->is_reset = 1; 1320 cif->is_reset = 1;
1340 if (chip) {
1341 for (i = 0; i < 4; i++)
1342 chip->firmware.ret.retwords[i] =
1343 firmware.ret.retwords[i];
1344 }
1345 1321
1346 return 0; 1322 return 0;
1347} 1323}
@@ -2038,14 +2014,12 @@ static int __devinit snd_riptide_mixer(struct snd_riptide *chip)
2038} 2014}
2039 2015
2040#ifdef SUPPORT_JOYSTICK 2016#ifdef SUPPORT_JOYSTICK
2041static int have_joystick;
2042static struct pci_dev *riptide_gameport_pci;
2043static struct gameport *riptide_gameport;
2044 2017
2045static int __devinit 2018static int __devinit
2046snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id) 2019snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
2047{ 2020{
2048 static int dev; 2021 static int dev;
2022 struct gameport *gameport;
2049 2023
2050 if (dev >= SNDRV_CARDS) 2024 if (dev >= SNDRV_CARDS)
2051 return -ENODEV; 2025 return -ENODEV;
@@ -2054,36 +2028,33 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
2054 return -ENOENT; 2028 return -ENOENT;
2055 } 2029 }
2056 2030
2057 if (joystick_port[dev]) { 2031 if (!joystick_port[dev++])
2058 riptide_gameport = gameport_allocate_port(); 2032 return 0;
2059 if (riptide_gameport) { 2033
2060 if (!request_region 2034 gameport = gameport_allocate_port();
2061 (joystick_port[dev], 8, "Riptide gameport")) { 2035 if (!gameport)
2062 snd_printk(KERN_WARNING 2036 return -ENOMEM;
2063 "Riptide: cannot grab gameport 0x%x\n", 2037 if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
2064 joystick_port[dev]); 2038 snd_printk(KERN_WARNING
2065 gameport_free_port(riptide_gameport); 2039 "Riptide: cannot grab gameport 0x%x\n",
2066 riptide_gameport = NULL; 2040 joystick_port[dev]);
2067 } else { 2041 gameport_free_port(gameport);
2068 riptide_gameport_pci = pci; 2042 return -EBUSY;
2069 riptide_gameport->io = joystick_port[dev];
2070 gameport_register_port(riptide_gameport);
2071 }
2072 }
2073 } 2043 }
2074 dev++; 2044
2045 gameport->io = joystick_port[dev];
2046 gameport_register_port(gameport);
2047 pci_set_drvdata(pci, gameport);
2075 return 0; 2048 return 0;
2076} 2049}
2077 2050
2078static void __devexit snd_riptide_joystick_remove(struct pci_dev *pci) 2051static void __devexit snd_riptide_joystick_remove(struct pci_dev *pci)
2079{ 2052{
2080 if (riptide_gameport) { 2053 struct gameport *gameport = pci_get_drvdata(pci);
2081 if (riptide_gameport_pci == pci) { 2054 if (gameport) {
2082 release_region(riptide_gameport->io, 8); 2055 release_region(gameport->io, 8);
2083 riptide_gameport_pci = NULL; 2056 gameport_unregister_port(gameport);
2084 gameport_unregister_port(riptide_gameport); 2057 pci_set_drvdata(pci, NULL);
2085 riptide_gameport = NULL;
2086 }
2087 } 2058 }
2088} 2059}
2089#endif 2060#endif
@@ -2094,8 +2065,8 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2094 static int dev; 2065 static int dev;
2095 struct snd_card *card; 2066 struct snd_card *card;
2096 struct snd_riptide *chip; 2067 struct snd_riptide *chip;
2097 unsigned short addr; 2068 unsigned short val;
2098 int err = 0; 2069 int err;
2099 2070
2100 if (dev >= SNDRV_CARDS) 2071 if (dev >= SNDRV_CARDS)
2101 return -ENODEV; 2072 return -ENODEV;
@@ -2107,60 +2078,63 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2107 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); 2078 err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
2108 if (err < 0) 2079 if (err < 0)
2109 return err; 2080 return err;
2110 if ((err = snd_riptide_create(card, pci, &chip)) < 0) { 2081 err = snd_riptide_create(card, pci, &chip);
2111 snd_card_free(card); 2082 if (err < 0)
2112 return err; 2083 goto error;
2113 }
2114 card->private_data = chip; 2084 card->private_data = chip;
2115 if ((err = snd_riptide_pcm(chip, 0, NULL)) < 0) { 2085 err = snd_riptide_pcm(chip, 0, NULL);
2116 snd_card_free(card); 2086 if (err < 0)
2117 return err; 2087 goto error;
2118 } 2088 err = snd_riptide_mixer(chip);
2119 if ((err = snd_riptide_mixer(chip)) < 0) { 2089 if (err < 0)
2120 snd_card_free(card); 2090 goto error;
2121 return err; 2091
2122 } 2092 val = LEGACY_ENABLE_ALL;
2123 pci_write_config_word(chip->pci, PCI_EXT_Legacy_Mask, LEGACY_ENABLE_ALL 2093 if (opl3_port[dev])
2124 | (opl3_port[dev] ? LEGACY_ENABLE_FM : 0) 2094 val |= LEGACY_ENABLE_FM;
2125#ifdef SUPPORT_JOYSTICK 2095#ifdef SUPPORT_JOYSTICK
2126 | (joystick_port[dev] ? LEGACY_ENABLE_GAMEPORT : 2096 if (joystick_port[dev])
2127 0) 2097 val |= LEGACY_ENABLE_GAMEPORT;
2128#endif 2098#endif
2129 | (mpu_port[dev] 2099 if (mpu_port[dev])
2130 ? (LEGACY_ENABLE_MPU_INT | LEGACY_ENABLE_MPU) : 2100 val |= LEGACY_ENABLE_MPU_INT | LEGACY_ENABLE_MPU;
2131 0) 2101 val |= (chip->irq << 4) & 0xf0;
2132 | ((chip->irq << 4) & 0xF0)); 2102 pci_write_config_word(chip->pci, PCI_EXT_Legacy_Mask, val);
2133 if ((addr = mpu_port[dev]) != 0) { 2103 if (mpu_port[dev]) {
2134 pci_write_config_word(chip->pci, PCI_EXT_MPU_Base, addr); 2104 val = mpu_port[dev];
2135 if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_RIPTIDE, 2105 pci_write_config_word(chip->pci, PCI_EXT_MPU_Base, val);
2136 addr, 0, chip->irq, 0, 2106 err = snd_mpu401_uart_new(card, 0, MPU401_HW_RIPTIDE,
2137 &chip->rmidi)) < 0) 2107 val, 0, chip->irq, 0,
2108 &chip->rmidi);
2109 if (err < 0)
2138 snd_printk(KERN_WARNING 2110 snd_printk(KERN_WARNING
2139 "Riptide: Can't Allocate MPU at 0x%x\n", 2111 "Riptide: Can't Allocate MPU at 0x%x\n",
2140 addr); 2112 val);
2141 else 2113 else
2142 chip->mpuaddr = addr; 2114 chip->mpuaddr = val;
2143 } 2115 }
2144 if ((addr = opl3_port[dev]) != 0) { 2116 if (opl3_port[dev]) {
2145 pci_write_config_word(chip->pci, PCI_EXT_FM_Base, addr); 2117 val = opl3_port[dev];
2146 if ((err = snd_opl3_create(card, addr, addr + 2, 2118 pci_write_config_word(chip->pci, PCI_EXT_FM_Base, val);
2147 OPL3_HW_RIPTIDE, 0, 2119 err = snd_opl3_create(card, val, val + 2,
2148 &chip->opl3)) < 0) 2120 OPL3_HW_RIPTIDE, 0, &chip->opl3);
2121 if (err < 0)
2149 snd_printk(KERN_WARNING 2122 snd_printk(KERN_WARNING
2150 "Riptide: Can't Allocate OPL3 at 0x%x\n", 2123 "Riptide: Can't Allocate OPL3 at 0x%x\n",
2151 addr); 2124 val);
2152 else { 2125 else {
2153 chip->opladdr = addr; 2126 chip->opladdr = val;
2154 if ((err = 2127 err = snd_opl3_hwdep_new(chip->opl3, 0, 1, NULL);
2155 snd_opl3_hwdep_new(chip->opl3, 0, 1, NULL)) < 0) 2128 if (err < 0)
2156 snd_printk(KERN_WARNING 2129 snd_printk(KERN_WARNING
2157 "Riptide: Can't Allocate OPL3-HWDEP\n"); 2130 "Riptide: Can't Allocate OPL3-HWDEP\n");
2158 } 2131 }
2159 } 2132 }
2160#ifdef SUPPORT_JOYSTICK 2133#ifdef SUPPORT_JOYSTICK
2161 if ((addr = joystick_port[dev]) != 0) { 2134 if (joystick_port[dev]) {
2162 pci_write_config_word(chip->pci, PCI_EXT_Game_Base, addr); 2135 val = joystick_port[dev];
2163 chip->gameaddr = addr; 2136 pci_write_config_word(chip->pci, PCI_EXT_Game_Base, val);
2137 chip->gameaddr = val;
2164 } 2138 }
2165#endif 2139#endif
2166 2140
@@ -2178,13 +2152,16 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2178 chip->opladdr); 2152 chip->opladdr);
2179#endif 2153#endif
2180 snd_riptide_proc_init(chip); 2154 snd_riptide_proc_init(chip);
2181 if ((err = snd_card_register(card)) < 0) { 2155 err = snd_card_register(card);
2182 snd_card_free(card); 2156 if (err < 0)
2183 return err; 2157 goto error;
2184 }
2185 pci_set_drvdata(pci, card); 2158 pci_set_drvdata(pci, card);
2186 dev++; 2159 dev++;
2187 return 0; 2160 return 0;
2161
2162 error:
2163 snd_card_free(card);
2164 return err;
2188} 2165}
2189 2166
2190static void __devexit snd_card_riptide_remove(struct pci_dev *pci) 2167static void __devexit snd_card_riptide_remove(struct pci_dev *pci)
@@ -2216,14 +2193,11 @@ static struct pci_driver joystick_driver = {
2216static int __init alsa_card_riptide_init(void) 2193static int __init alsa_card_riptide_init(void)
2217{ 2194{
2218 int err; 2195 int err;
2219 if ((err = pci_register_driver(&driver)) < 0) 2196 err = pci_register_driver(&driver);
2197 if (err < 0)
2220 return err; 2198 return err;
2221#if defined(SUPPORT_JOYSTICK) 2199#if defined(SUPPORT_JOYSTICK)
2222 if (pci_register_driver(&joystick_driver) < 0) { 2200 pci_register_driver(&joystick_driver);
2223 have_joystick = 0;
2224 snd_printk(KERN_INFO "no joystick found\n");
2225 } else
2226 have_joystick = 1;
2227#endif 2201#endif
2228 return 0; 2202 return 0;
2229} 2203}
@@ -2232,8 +2206,7 @@ static void __exit alsa_card_riptide_exit(void)
2232{ 2206{
2233 pci_unregister_driver(&driver); 2207 pci_unregister_driver(&driver);
2234#if defined(SUPPORT_JOYSTICK) 2208#if defined(SUPPORT_JOYSTICK)
2235 if (have_joystick) 2209 pci_unregister_driver(&joystick_driver);
2236 pci_unregister_driver(&joystick_driver);
2237#endif 2210#endif
2238} 2211}
2239 2212
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 314e73531bd1..3da5c029f93b 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -28,6 +28,7 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/firmware.h> 29#include <linux/firmware.h>
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/math64.h>
31 32
32#include <sound/core.h> 33#include <sound/core.h>
33#include <sound/control.h> 34#include <sound/control.h>
@@ -402,9 +403,9 @@ MODULE_FIRMWARE("digiface_firmware_rev11.bin");
402#define HDSP_DMA_AREA_BYTES ((HDSP_MAX_CHANNELS+1) * HDSP_CHANNEL_BUFFER_BYTES) 403#define HDSP_DMA_AREA_BYTES ((HDSP_MAX_CHANNELS+1) * HDSP_CHANNEL_BUFFER_BYTES)
403#define HDSP_DMA_AREA_KILOBYTES (HDSP_DMA_AREA_BYTES/1024) 404#define HDSP_DMA_AREA_KILOBYTES (HDSP_DMA_AREA_BYTES/1024)
404 405
405/* use hotplug firmeare loader? */ 406/* use hotplug firmware loader? */
406#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE) 407#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
407#if !defined(HDSP_USE_HWDEP_LOADER) && !defined(CONFIG_SND_HDSP) 408#if !defined(HDSP_USE_HWDEP_LOADER)
408#define HDSP_FW_LOADER 409#define HDSP_FW_LOADER
409#endif 410#endif
410#endif 411#endif
@@ -1047,7 +1048,6 @@ static int hdsp_set_interrupt_interval(struct hdsp *s, unsigned int frames)
1047static void hdsp_set_dds_value(struct hdsp *hdsp, int rate) 1048static void hdsp_set_dds_value(struct hdsp *hdsp, int rate)
1048{ 1049{
1049 u64 n; 1050 u64 n;
1050 u32 r;
1051 1051
1052 if (rate >= 112000) 1052 if (rate >= 112000)
1053 rate /= 4; 1053 rate /= 4;
@@ -1055,7 +1055,7 @@ static void hdsp_set_dds_value(struct hdsp *hdsp, int rate)
1055 rate /= 2; 1055 rate /= 2;
1056 1056
1057 n = DDS_NUMERATOR; 1057 n = DDS_NUMERATOR;
1058 div64_32(&n, rate, &r); 1058 n = div_u64(n, rate);
1059 /* n should be less than 2^32 for being written to FREQ register */ 1059 /* n should be less than 2^32 for being written to FREQ register */
1060 snd_BUG_ON(n >> 32); 1060 snd_BUG_ON(n >> 32);
1061 /* HDSP_freqReg and HDSP_resetPointer are the same, so keep the DDS 1061 /* HDSP_freqReg and HDSP_resetPointer are the same, so keep the DDS
@@ -3097,7 +3097,6 @@ static int snd_hdsp_get_adat_sync_check(struct snd_kcontrol *kcontrol, struct sn
3097static int hdsp_dds_offset(struct hdsp *hdsp) 3097static int hdsp_dds_offset(struct hdsp *hdsp)
3098{ 3098{
3099 u64 n; 3099 u64 n;
3100 u32 r;
3101 unsigned int dds_value = hdsp->dds_value; 3100 unsigned int dds_value = hdsp->dds_value;
3102 int system_sample_rate = hdsp->system_sample_rate; 3101 int system_sample_rate = hdsp->system_sample_rate;
3103 3102
@@ -3109,7 +3108,7 @@ static int hdsp_dds_offset(struct hdsp *hdsp)
3109 * dds_value = n / rate 3108 * dds_value = n / rate
3110 * rate = n / dds_value 3109 * rate = n / dds_value
3111 */ 3110 */
3112 div64_32(&n, dds_value, &r); 3111 n = div_u64(n, dds_value);
3113 if (system_sample_rate >= 112000) 3112 if (system_sample_rate >= 112000)
3114 n *= 4; 3113 n *= 4;
3115 else if (system_sample_rate >= 56000) 3114 else if (system_sample_rate >= 56000)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index bac2dc0c5d85..0dce331a2a3b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -29,6 +29,7 @@
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/math64.h>
32#include <asm/io.h> 33#include <asm/io.h>
33 34
34#include <sound/core.h> 35#include <sound/core.h>
@@ -831,7 +832,6 @@ static int hdspm_set_interrupt_interval(struct hdspm * s, unsigned int frames)
831static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) 832static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
832{ 833{
833 u64 n; 834 u64 n;
834 u32 r;
835 835
836 if (rate >= 112000) 836 if (rate >= 112000)
837 rate /= 4; 837 rate /= 4;
@@ -844,7 +844,7 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
844 */ 844 */
845 /* n = 104857600000000ULL; */ /* = 2^20 * 10^8 */ 845 /* n = 104857600000000ULL; */ /* = 2^20 * 10^8 */
846 n = 110100480000000ULL; /* Value checked for AES32 and MADI */ 846 n = 110100480000000ULL; /* Value checked for AES32 and MADI */
847 div64_32(&n, rate, &r); 847 n = div_u64(n, rate);
848 /* n should be less than 2^32 for being written to FREQ register */ 848 /* n should be less than 2^32 for being written to FREQ register */
849 snd_BUG_ON(n >> 32); 849 snd_BUG_ON(n >> 32);
850 hdspm_write(hdspm, HDSPM_freqReg, (u32)n); 850 hdspm_write(hdspm, HDSPM_freqReg, (u32)n);
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index 80df9b1f651e..2cc0eda4f20e 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -477,7 +477,7 @@ static int snd_pmac_awacs_put_master_amp(struct snd_kcontrol *kcontrol,
477#define AMP_CH_SPK 0 477#define AMP_CH_SPK 0
478#define AMP_CH_HD 1 478#define AMP_CH_HD 1
479 479
480static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] __initdata = { 480static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] __devinitdata = {
481 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 481 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
482 .name = "PC Speaker Playback Volume", 482 .name = "PC Speaker Playback Volume",
483 .info = snd_pmac_awacs_info_volume_amp, 483 .info = snd_pmac_awacs_info_volume_amp,
@@ -514,7 +514,7 @@ static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] __initdata = {
514 }, 514 },
515}; 515};
516 516
517static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw __initdata = { 517static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw __devinitdata = {
518 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 518 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
519 .name = "Headphone Playback Switch", 519 .name = "Headphone Playback Switch",
520 .info = snd_pmac_boolean_stereo_info, 520 .info = snd_pmac_boolean_stereo_info,
@@ -523,7 +523,7 @@ static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw __initdata = {
523 .private_value = AMP_CH_HD, 523 .private_value = AMP_CH_HD,
524}; 524};
525 525
526static struct snd_kcontrol_new snd_pmac_awacs_amp_spk_sw __initdata = { 526static struct snd_kcontrol_new snd_pmac_awacs_amp_spk_sw __devinitdata = {
527 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 527 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
528 .name = "PC Speaker Playback Switch", 528 .name = "PC Speaker Playback Switch",
529 .info = snd_pmac_boolean_stereo_info, 529 .info = snd_pmac_boolean_stereo_info,
@@ -595,46 +595,46 @@ static int snd_pmac_screamer_mic_boost_put(struct snd_kcontrol *kcontrol,
595/* 595/*
596 * lists of mixer elements 596 * lists of mixer elements
597 */ 597 */
598static struct snd_kcontrol_new snd_pmac_awacs_mixers[] __initdata = { 598static struct snd_kcontrol_new snd_pmac_awacs_mixers[] __devinitdata = {
599 AWACS_SWITCH("Master Capture Switch", 1, SHIFT_LOOPTHRU, 0), 599 AWACS_SWITCH("Master Capture Switch", 1, SHIFT_LOOPTHRU, 0),
600 AWACS_VOLUME("Master Capture Volume", 0, 4, 0), 600 AWACS_VOLUME("Master Capture Volume", 0, 4, 0),
601/* AWACS_SWITCH("Unknown Playback Switch", 6, SHIFT_PAROUT0, 0), */ 601/* AWACS_SWITCH("Unknown Playback Switch", 6, SHIFT_PAROUT0, 0), */
602}; 602};
603 603
604static struct snd_kcontrol_new snd_pmac_screamer_mixers_beige[] __initdata = { 604static struct snd_kcontrol_new snd_pmac_screamer_mixers_beige[] __devinitdata = {
605 AWACS_VOLUME("Master Playback Volume", 2, 6, 1), 605 AWACS_VOLUME("Master Playback Volume", 2, 6, 1),
606 AWACS_VOLUME("Play-through Playback Volume", 5, 6, 1), 606 AWACS_VOLUME("Play-through Playback Volume", 5, 6, 1),
607 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0), 607 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
608 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_LINE, 0), 608 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_LINE, 0),
609}; 609};
610 610
611static struct snd_kcontrol_new snd_pmac_screamer_mixers_lo[] __initdata = { 611static struct snd_kcontrol_new snd_pmac_screamer_mixers_lo[] __devinitdata = {
612 AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), 612 AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
613}; 613};
614 614
615static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __initdata = { 615static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __devinitdata = {
616 AWACS_VOLUME("Play-through Playback Volume", 5, 6, 1), 616 AWACS_VOLUME("Play-through Playback Volume", 5, 6, 1),
617 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), 617 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
618}; 618};
619 619
620static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __initdata = { 620static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __devinitdata = {
621 AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), 621 AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
622 AWACS_VOLUME("Master Playback Volume", 5, 6, 1), 622 AWACS_VOLUME("Master Playback Volume", 5, 6, 1),
623 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), 623 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
624 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0), 624 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
625}; 625};
626 626
627static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __initdata = { 627static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __devinitdata = {
628 AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), 628 AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
629 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), 629 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
630 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0), 630 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
631}; 631};
632 632
633static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac5500[] __initdata = { 633static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac5500[] __devinitdata = {
634 AWACS_VOLUME("Headphone Playback Volume", 2, 6, 1), 634 AWACS_VOLUME("Headphone Playback Volume", 2, 6, 1),
635}; 635};
636 636
637static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac[] __initdata = { 637static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac[] __devinitdata = {
638 AWACS_VOLUME("Master Playback Volume", 2, 6, 1), 638 AWACS_VOLUME("Master Playback Volume", 2, 6, 1),
639 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), 639 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
640}; 640};
@@ -642,34 +642,34 @@ static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac[] __initdata = {
642/* FIXME: is this correct order? 642/* FIXME: is this correct order?
643 * screamer (powerbook G3 pismo) seems to have different bits... 643 * screamer (powerbook G3 pismo) seems to have different bits...
644 */ 644 */
645static struct snd_kcontrol_new snd_pmac_awacs_mixers2[] __initdata = { 645static struct snd_kcontrol_new snd_pmac_awacs_mixers2[] __devinitdata = {
646 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_LINE, 0), 646 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_LINE, 0),
647 AWACS_SWITCH("Mic Capture Switch", 0, SHIFT_MUX_MIC, 0), 647 AWACS_SWITCH("Mic Capture Switch", 0, SHIFT_MUX_MIC, 0),
648}; 648};
649 649
650static struct snd_kcontrol_new snd_pmac_screamer_mixers2[] __initdata = { 650static struct snd_kcontrol_new snd_pmac_screamer_mixers2[] __devinitdata = {
651 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0), 651 AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
652 AWACS_SWITCH("Mic Capture Switch", 0, SHIFT_MUX_LINE, 0), 652 AWACS_SWITCH("Mic Capture Switch", 0, SHIFT_MUX_LINE, 0),
653}; 653};
654 654
655static struct snd_kcontrol_new snd_pmac_awacs_mixers2_pmac5500[] __initdata = { 655static struct snd_kcontrol_new snd_pmac_awacs_mixers2_pmac5500[] __devinitdata = {
656 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), 656 AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
657}; 657};
658 658
659static struct snd_kcontrol_new snd_pmac_awacs_master_sw __initdata = 659static struct snd_kcontrol_new snd_pmac_awacs_master_sw __devinitdata =
660AWACS_SWITCH("Master Playback Switch", 1, SHIFT_HDMUTE, 1); 660AWACS_SWITCH("Master Playback Switch", 1, SHIFT_HDMUTE, 1);
661 661
662static struct snd_kcontrol_new snd_pmac_awacs_master_sw_imac __initdata = 662static struct snd_kcontrol_new snd_pmac_awacs_master_sw_imac __devinitdata =
663AWACS_SWITCH("Line out Playback Switch", 1, SHIFT_HDMUTE, 1); 663AWACS_SWITCH("Line out Playback Switch", 1, SHIFT_HDMUTE, 1);
664 664
665static struct snd_kcontrol_new snd_pmac_awacs_master_sw_pmac5500 __initdata = 665static struct snd_kcontrol_new snd_pmac_awacs_master_sw_pmac5500 __devinitdata =
666AWACS_SWITCH("Headphone Playback Switch", 1, SHIFT_HDMUTE, 1); 666AWACS_SWITCH("Headphone Playback Switch", 1, SHIFT_HDMUTE, 1);
667 667
668static struct snd_kcontrol_new snd_pmac_awacs_mic_boost[] __initdata = { 668static struct snd_kcontrol_new snd_pmac_awacs_mic_boost[] __devinitdata = {
669 AWACS_SWITCH("Mic Boost Capture Switch", 0, SHIFT_GAINLINE, 0), 669 AWACS_SWITCH("Mic Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
670}; 670};
671 671
672static struct snd_kcontrol_new snd_pmac_screamer_mic_boost[] __initdata = { 672static struct snd_kcontrol_new snd_pmac_screamer_mic_boost[] __devinitdata = {
673 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 673 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
674 .name = "Mic Boost Capture Volume", 674 .name = "Mic Boost Capture Volume",
675 .info = snd_pmac_screamer_mic_boost_info, 675 .info = snd_pmac_screamer_mic_boost_info,
@@ -678,34 +678,34 @@ static struct snd_kcontrol_new snd_pmac_screamer_mic_boost[] __initdata = {
678 }, 678 },
679}; 679};
680 680
681static struct snd_kcontrol_new snd_pmac_awacs_mic_boost_pmac7500[] __initdata = 681static struct snd_kcontrol_new snd_pmac_awacs_mic_boost_pmac7500[] __devinitdata =
682{ 682{
683 AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0), 683 AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
684}; 684};
685 685
686static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_beige[] __initdata = 686static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_beige[] __devinitdata =
687{ 687{
688 AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0), 688 AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
689 AWACS_SWITCH("CD Boost Capture Switch", 6, SHIFT_MIC_BOOST, 0), 689 AWACS_SWITCH("CD Boost Capture Switch", 6, SHIFT_MIC_BOOST, 0),
690}; 690};
691 691
692static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_imac[] __initdata = 692static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_imac[] __devinitdata =
693{ 693{
694 AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0), 694 AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
695 AWACS_SWITCH("Mic Boost Capture Switch", 6, SHIFT_MIC_BOOST, 0), 695 AWACS_SWITCH("Mic Boost Capture Switch", 6, SHIFT_MIC_BOOST, 0),
696}; 696};
697 697
698static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __initdata = { 698static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __devinitdata = {
699 AWACS_VOLUME("PC Speaker Playback Volume", 4, 6, 1), 699 AWACS_VOLUME("PC Speaker Playback Volume", 4, 6, 1),
700}; 700};
701 701
702static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __initdata = 702static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __devinitdata =
703AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1); 703AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1);
704 704
705static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __initdata = 705static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __devinitdata =
706AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 1); 706AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 1);
707 707
708static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __initdata = 708static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __devinitdata =
709AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0); 709AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0);
710 710
711 711
@@ -872,7 +872,7 @@ static void snd_pmac_awacs_update_automute(struct snd_pmac *chip, int do_notify)
872/* 872/*
873 * initialize chip 873 * initialize chip
874 */ 874 */
875int __init 875int __devinit
876snd_pmac_awacs_init(struct snd_pmac *chip) 876snd_pmac_awacs_init(struct snd_pmac *chip)
877{ 877{
878 int pm7500 = IS_PM7500; 878 int pm7500 = IS_PM7500;
diff --git a/sound/ppc/beep.c b/sound/ppc/beep.c
index 89f5c328acfe..a9d350789f55 100644
--- a/sound/ppc/beep.c
+++ b/sound/ppc/beep.c
@@ -215,7 +215,7 @@ static struct snd_kcontrol_new snd_pmac_beep_mixer = {
215}; 215};
216 216
217/* Initialize beep stuff */ 217/* Initialize beep stuff */
218int __init snd_pmac_attach_beep(struct snd_pmac *chip) 218int __devinit snd_pmac_attach_beep(struct snd_pmac *chip)
219{ 219{
220 struct pmac_beep *beep; 220 struct pmac_beep *beep;
221 struct input_dev *input_dev; 221 struct input_dev *input_dev;
diff --git a/sound/ppc/burgundy.c b/sound/ppc/burgundy.c
index 45a76297c38d..16ed240e423c 100644
--- a/sound/ppc/burgundy.c
+++ b/sound/ppc/burgundy.c
@@ -46,12 +46,12 @@ snd_pmac_burgundy_extend_wait(struct snd_pmac *chip)
46 timeout = 50; 46 timeout = 50;
47 while (!(in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--) 47 while (!(in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--)
48 udelay(1); 48 udelay(1);
49 if (! timeout) 49 if (timeout < 0)
50 printk(KERN_DEBUG "burgundy_extend_wait: timeout #1\n"); 50 printk(KERN_DEBUG "burgundy_extend_wait: timeout #1\n");
51 timeout = 50; 51 timeout = 50;
52 while ((in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--) 52 while ((in_le32(&chip->awacs->codec_stat) & MASK_EXTEND) && timeout--)
53 udelay(1); 53 udelay(1);
54 if (! timeout) 54 if (timeout < 0)
55 printk(KERN_DEBUG "burgundy_extend_wait: timeout #2\n"); 55 printk(KERN_DEBUG "burgundy_extend_wait: timeout #2\n");
56} 56}
57 57
@@ -468,7 +468,7 @@ static int snd_pmac_burgundy_put_switch_b(struct snd_kcontrol *kcontrol,
468/* 468/*
469 * Burgundy mixers 469 * Burgundy mixers
470 */ 470 */
471static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] __initdata = { 471static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] __devinitdata = {
472 BURGUNDY_VOLUME_W("Master Playback Volume", 0, 472 BURGUNDY_VOLUME_W("Master Playback Volume", 0,
473 MASK_ADDR_BURGUNDY_MASTER_VOLUME, 8), 473 MASK_ADDR_BURGUNDY_MASTER_VOLUME, 8),
474 BURGUNDY_VOLUME_W("CD Capture Volume", 0, 474 BURGUNDY_VOLUME_W("CD Capture Volume", 0,
@@ -496,7 +496,7 @@ static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] __initdata = {
496 */ BURGUNDY_SWITCH_B("PCM Capture Switch", 0, 496 */ BURGUNDY_SWITCH_B("PCM Capture Switch", 0,
497 MASK_ADDR_BURGUNDY_HOSTIFEH, 0x01, 0, 0) 497 MASK_ADDR_BURGUNDY_HOSTIFEH, 0x01, 0, 0)
498}; 498};
499static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] __initdata = { 499static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] __devinitdata = {
500 BURGUNDY_VOLUME_W("Line in Capture Volume", 0, 500 BURGUNDY_VOLUME_W("Line in Capture Volume", 0,
501 MASK_ADDR_BURGUNDY_VOLLINE, 16), 501 MASK_ADDR_BURGUNDY_VOLLINE, 16),
502 BURGUNDY_VOLUME_W("Mic Capture Volume", 0, 502 BURGUNDY_VOLUME_W("Mic Capture Volume", 0,
@@ -522,7 +522,7 @@ static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] __initdata = {
522 BURGUNDY_SWITCH_B("Mic Boost Capture Switch", 0, 522 BURGUNDY_SWITCH_B("Mic Boost Capture Switch", 0,
523 MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) 523 MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1)
524}; 524};
525static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] __initdata = { 525static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] __devinitdata = {
526 BURGUNDY_VOLUME_W("Line in Capture Volume", 0, 526 BURGUNDY_VOLUME_W("Line in Capture Volume", 0,
527 MASK_ADDR_BURGUNDY_VOLMIC, 16), 527 MASK_ADDR_BURGUNDY_VOLMIC, 16),
528 BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0, 528 BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0,
@@ -538,33 +538,33 @@ static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] __initdata = {
538/* BURGUNDY_SWITCH_B("Line in Boost Capture Switch", 0, 538/* BURGUNDY_SWITCH_B("Line in Boost Capture Switch", 0,
539 * MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) */ 539 * MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) */
540}; 540};
541static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_imac __initdata = 541static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_imac __devinitdata =
542BURGUNDY_SWITCH_B("Master Playback Switch", 0, 542BURGUNDY_SWITCH_B("Master Playback Switch", 0,
543 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 543 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
544 BURGUNDY_OUTPUT_LEFT | BURGUNDY_LINEOUT_LEFT | BURGUNDY_HP_LEFT, 544 BURGUNDY_OUTPUT_LEFT | BURGUNDY_LINEOUT_LEFT | BURGUNDY_HP_LEFT,
545 BURGUNDY_OUTPUT_RIGHT | BURGUNDY_LINEOUT_RIGHT | BURGUNDY_HP_RIGHT, 1); 545 BURGUNDY_OUTPUT_RIGHT | BURGUNDY_LINEOUT_RIGHT | BURGUNDY_HP_RIGHT, 1);
546static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_pmac __initdata = 546static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_pmac __devinitdata =
547BURGUNDY_SWITCH_B("Master Playback Switch", 0, 547BURGUNDY_SWITCH_B("Master Playback Switch", 0,
548 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 548 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
549 BURGUNDY_OUTPUT_INTERN 549 BURGUNDY_OUTPUT_INTERN
550 | BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); 550 | BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1);
551static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_imac __initdata = 551static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_imac __devinitdata =
552BURGUNDY_SWITCH_B("PC Speaker Playback Switch", 0, 552BURGUNDY_SWITCH_B("PC Speaker Playback Switch", 0,
553 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 553 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
554 BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); 554 BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1);
555static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_pmac __initdata = 555static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_pmac __devinitdata =
556BURGUNDY_SWITCH_B("PC Speaker Playback Switch", 0, 556BURGUNDY_SWITCH_B("PC Speaker Playback Switch", 0,
557 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 557 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
558 BURGUNDY_OUTPUT_INTERN, 0, 0); 558 BURGUNDY_OUTPUT_INTERN, 0, 0);
559static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_imac __initdata = 559static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_imac __devinitdata =
560BURGUNDY_SWITCH_B("Line out Playback Switch", 0, 560BURGUNDY_SWITCH_B("Line out Playback Switch", 0,
561 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 561 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
562 BURGUNDY_LINEOUT_LEFT, BURGUNDY_LINEOUT_RIGHT, 1); 562 BURGUNDY_LINEOUT_LEFT, BURGUNDY_LINEOUT_RIGHT, 1);
563static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_pmac __initdata = 563static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_pmac __devinitdata =
564BURGUNDY_SWITCH_B("Line out Playback Switch", 0, 564BURGUNDY_SWITCH_B("Line out Playback Switch", 0,
565 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 565 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
566 BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1); 566 BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1);
567static struct snd_kcontrol_new snd_pmac_burgundy_hp_sw_imac __initdata = 567static struct snd_kcontrol_new snd_pmac_burgundy_hp_sw_imac __devinitdata =
568BURGUNDY_SWITCH_B("Headphone Playback Switch", 0, 568BURGUNDY_SWITCH_B("Headphone Playback Switch", 0,
569 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES, 569 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
570 BURGUNDY_HP_LEFT, BURGUNDY_HP_RIGHT, 1); 570 BURGUNDY_HP_LEFT, BURGUNDY_HP_RIGHT, 1);
@@ -618,7 +618,7 @@ static void snd_pmac_burgundy_update_automute(struct snd_pmac *chip, int do_noti
618/* 618/*
619 * initialize burgundy 619 * initialize burgundy
620 */ 620 */
621int __init snd_pmac_burgundy_init(struct snd_pmac *chip) 621int __devinit snd_pmac_burgundy_init(struct snd_pmac *chip)
622{ 622{
623 int imac = machine_is_compatible("iMac"); 623 int imac = machine_is_compatible("iMac");
624 int i, err; 624 int i, err;
diff --git a/sound/ppc/daca.c b/sound/ppc/daca.c
index f8d478c2da62..24200b7bdace 100644
--- a/sound/ppc/daca.c
+++ b/sound/ppc/daca.c
@@ -244,7 +244,7 @@ static void daca_cleanup(struct snd_pmac *chip)
244} 244}
245 245
246/* exported */ 246/* exported */
247int __init snd_pmac_daca_init(struct snd_pmac *chip) 247int __devinit snd_pmac_daca_init(struct snd_pmac *chip)
248{ 248{
249 int i, err; 249 int i, err;
250 struct pmac_daca *mix; 250 struct pmac_daca *mix;
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index a5afb2682e7f..835fa19ed461 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -33,10 +33,6 @@
33static struct pmac_keywest *keywest_ctx; 33static struct pmac_keywest *keywest_ctx;
34 34
35 35
36#ifndef i2c_device_name
37#define i2c_device_name(x) ((x)->name)
38#endif
39
40static int keywest_probe(struct i2c_client *client, 36static int keywest_probe(struct i2c_client *client,
41 const struct i2c_device_id *id) 37 const struct i2c_device_id *id)
42{ 38{
@@ -56,7 +52,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
56 if (! keywest_ctx) 52 if (! keywest_ctx)
57 return -EINVAL; 53 return -EINVAL;
58 54
59 if (strncmp(i2c_device_name(adapter), "mac-io", 6)) 55 if (strncmp(adapter->name, "mac-io", 6))
60 return 0; /* ignored */ 56 return 0; /* ignored */
61 57
62 memset(&info, 0, sizeof(struct i2c_board_info)); 58 memset(&info, 0, sizeof(struct i2c_board_info));
@@ -109,7 +105,7 @@ void snd_pmac_keywest_cleanup(struct pmac_keywest *i2c)
109 } 105 }
110} 106}
111 107
112int __init snd_pmac_tumbler_post_init(void) 108int __devinit snd_pmac_tumbler_post_init(void)
113{ 109{
114 int err; 110 int err;
115 111
@@ -124,7 +120,7 @@ int __init snd_pmac_tumbler_post_init(void)
124} 120}
125 121
126/* exported */ 122/* exported */
127int __init snd_pmac_keywest_init(struct pmac_keywest *i2c) 123int __devinit snd_pmac_keywest_init(struct pmac_keywest *i2c)
128{ 124{
129 int err; 125 int err;
130 126
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 9b4e9c316695..7bc492ee77ec 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -702,7 +702,7 @@ static struct snd_pcm_ops snd_pmac_capture_ops = {
702 .pointer = snd_pmac_capture_pointer, 702 .pointer = snd_pmac_capture_pointer,
703}; 703};
704 704
705int __init snd_pmac_pcm_new(struct snd_pmac *chip) 705int __devinit snd_pmac_pcm_new(struct snd_pmac *chip)
706{ 706{
707 struct snd_pcm *pcm; 707 struct snd_pcm *pcm;
708 int err; 708 int err;
@@ -908,7 +908,7 @@ static int snd_pmac_dev_free(struct snd_device *device)
908 * check the machine support byteswap (little-endian) 908 * check the machine support byteswap (little-endian)
909 */ 909 */
910 910
911static void __init detect_byte_swap(struct snd_pmac *chip) 911static void __devinit detect_byte_swap(struct snd_pmac *chip)
912{ 912{
913 struct device_node *mio; 913 struct device_node *mio;
914 914
@@ -934,7 +934,7 @@ static void __init detect_byte_swap(struct snd_pmac *chip)
934/* 934/*
935 * detect a sound chip 935 * detect a sound chip
936 */ 936 */
937static int __init snd_pmac_detect(struct snd_pmac *chip) 937static int __devinit snd_pmac_detect(struct snd_pmac *chip)
938{ 938{
939 struct device_node *sound; 939 struct device_node *sound;
940 struct device_node *dn; 940 struct device_node *dn;
@@ -1143,7 +1143,7 @@ static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol,
1143 return 0; 1143 return 0;
1144} 1144}
1145 1145
1146static struct snd_kcontrol_new auto_mute_controls[] __initdata = { 1146static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = {
1147 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1147 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1148 .name = "Auto Mute Switch", 1148 .name = "Auto Mute Switch",
1149 .info = snd_pmac_boolean_mono_info, 1149 .info = snd_pmac_boolean_mono_info,
@@ -1158,7 +1158,7 @@ static struct snd_kcontrol_new auto_mute_controls[] __initdata = {
1158 }, 1158 },
1159}; 1159};
1160 1160
1161int __init snd_pmac_add_automute(struct snd_pmac *chip) 1161int __devinit snd_pmac_add_automute(struct snd_pmac *chip)
1162{ 1162{
1163 int err; 1163 int err;
1164 chip->auto_mute = 1; 1164 chip->auto_mute = 1;
@@ -1175,7 +1175,7 @@ int __init snd_pmac_add_automute(struct snd_pmac *chip)
1175/* 1175/*
1176 * create and detect a pmac chip record 1176 * create and detect a pmac chip record
1177 */ 1177 */
1178int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) 1178int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1179{ 1179{
1180 struct snd_pmac *chip; 1180 struct snd_pmac *chip;
1181 struct device_node *np; 1181 struct device_node *np;
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index f361c26506aa..53c81a547613 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -18,81 +18,31 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21#include <linux/dma-mapping.h>
22#include <linux/dmapool.h>
21#include <linux/init.h> 23#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/io.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/slab.h>
27
28#include <sound/asound.h>
29#include <sound/control.h>
25#include <sound/core.h> 30#include <sound/core.h>
26#include <sound/initval.h> 31#include <sound/initval.h>
27#include <sound/pcm.h>
28#include <sound/asound.h>
29#include <sound/memalloc.h> 32#include <sound/memalloc.h>
33#include <sound/pcm.h>
30#include <sound/pcm_params.h> 34#include <sound/pcm_params.h>
31#include <sound/control.h> 35
32#include <linux/dmapool.h>
33#include <linux/dma-mapping.h>
34#include <asm/firmware.h>
35#include <asm/dma.h> 36#include <asm/dma.h>
37#include <asm/firmware.h>
36#include <asm/lv1call.h> 38#include <asm/lv1call.h>
37#include <asm/ps3.h> 39#include <asm/ps3.h>
38#include <asm/ps3av.h> 40#include <asm/ps3av.h>
39 41
40#include "snd_ps3_reg.h"
41#include "snd_ps3.h" 42#include "snd_ps3.h"
42 43#include "snd_ps3_reg.h"
43MODULE_LICENSE("GPL v2");
44MODULE_DESCRIPTION("PS3 sound driver");
45MODULE_AUTHOR("Sony Computer Entertainment Inc.");
46
47/* module entries */
48static int __init snd_ps3_init(void);
49static void __exit snd_ps3_exit(void);
50
51/* ALSA snd driver ops */
52static int snd_ps3_pcm_open(struct snd_pcm_substream *substream);
53static int snd_ps3_pcm_close(struct snd_pcm_substream *substream);
54static int snd_ps3_pcm_prepare(struct snd_pcm_substream *substream);
55static int snd_ps3_pcm_trigger(struct snd_pcm_substream *substream,
56 int cmd);
57static snd_pcm_uframes_t snd_ps3_pcm_pointer(struct snd_pcm_substream
58 *substream);
59static int snd_ps3_pcm_hw_params(struct snd_pcm_substream *substream,
60 struct snd_pcm_hw_params *hw_params);
61static int snd_ps3_pcm_hw_free(struct snd_pcm_substream *substream);
62
63
64/* ps3_system_bus_driver entries */
65static int __init snd_ps3_driver_probe(struct ps3_system_bus_device *dev);
66static int snd_ps3_driver_remove(struct ps3_system_bus_device *dev);
67
68/* address setup */
69static int snd_ps3_map_mmio(void);
70static void snd_ps3_unmap_mmio(void);
71static int snd_ps3_allocate_irq(void);
72static void snd_ps3_free_irq(void);
73static void snd_ps3_audio_set_base_addr(uint64_t ioaddr_start);
74
75/* interrupt handler */
76static irqreturn_t snd_ps3_interrupt(int irq, void *dev_id);
77
78
79/* set sampling rate/format */
80static int snd_ps3_set_avsetting(struct snd_pcm_substream *substream);
81/* take effect parameter change */
82static int snd_ps3_change_avsetting(struct snd_ps3_card_info *card);
83/* initialize avsetting and take it effect */
84static int snd_ps3_init_avsetting(struct snd_ps3_card_info *card);
85/* setup dma */
86static int snd_ps3_program_dma(struct snd_ps3_card_info *card,
87 enum snd_ps3_dma_filltype filltype);
88static void snd_ps3_wait_for_dma_stop(struct snd_ps3_card_info *card);
89
90static dma_addr_t v_to_bus(struct snd_ps3_card_info *, void *vaddr, int ch);
91 44
92 45
93module_init(snd_ps3_init);
94module_exit(snd_ps3_exit);
95
96/* 46/*
97 * global 47 * global
98 */ 48 */
@@ -165,25 +115,13 @@ static const struct snd_pcm_hardware snd_ps3_pcm_hw = {
165 .fifo_size = PS3_AUDIO_FIFO_SIZE 115 .fifo_size = PS3_AUDIO_FIFO_SIZE
166}; 116};
167 117
168static struct snd_pcm_ops snd_ps3_pcm_spdif_ops =
169{
170 .open = snd_ps3_pcm_open,
171 .close = snd_ps3_pcm_close,
172 .prepare = snd_ps3_pcm_prepare,
173 .ioctl = snd_pcm_lib_ioctl,
174 .trigger = snd_ps3_pcm_trigger,
175 .pointer = snd_ps3_pcm_pointer,
176 .hw_params = snd_ps3_pcm_hw_params,
177 .hw_free = snd_ps3_pcm_hw_free
178};
179
180static int snd_ps3_verify_dma_stop(struct snd_ps3_card_info *card, 118static int snd_ps3_verify_dma_stop(struct snd_ps3_card_info *card,
181 int count, int force_stop) 119 int count, int force_stop)
182{ 120{
183 int dma_ch, done, retries, stop_forced = 0; 121 int dma_ch, done, retries, stop_forced = 0;
184 uint32_t status; 122 uint32_t status;
185 123
186 for (dma_ch = 0; dma_ch < 8; dma_ch ++) { 124 for (dma_ch = 0; dma_ch < 8; dma_ch++) {
187 retries = count; 125 retries = count;
188 do { 126 do {
189 status = read_reg(PS3_AUDIO_KICK(dma_ch)) & 127 status = read_reg(PS3_AUDIO_KICK(dma_ch)) &
@@ -259,9 +197,7 @@ static void snd_ps3_kick_dma(struct snd_ps3_card_info *card)
259/* 197/*
260 * convert virtual addr to ioif bus addr. 198 * convert virtual addr to ioif bus addr.
261 */ 199 */
262static dma_addr_t v_to_bus(struct snd_ps3_card_info *card, 200static dma_addr_t v_to_bus(struct snd_ps3_card_info *card, void *paddr, int ch)
263 void * paddr,
264 int ch)
265{ 201{
266 return card->dma_start_bus_addr[ch] + 202 return card->dma_start_bus_addr[ch] +
267 (paddr - card->dma_start_vaddr[ch]); 203 (paddr - card->dma_start_vaddr[ch]);
@@ -321,7 +257,7 @@ static int snd_ps3_program_dma(struct snd_ps3_card_info *card,
321 spin_lock_irqsave(&card->dma_lock, irqsave); 257 spin_lock_irqsave(&card->dma_lock, irqsave);
322 for (ch = 0; ch < 2; ch++) { 258 for (ch = 0; ch < 2; ch++) {
323 start_vaddr = card->dma_next_transfer_vaddr[0]; 259 start_vaddr = card->dma_next_transfer_vaddr[0];
324 for (stage = 0; stage < fill_stages; stage ++) { 260 for (stage = 0; stage < fill_stages; stage++) {
325 dma_ch = stage * 2 + ch; 261 dma_ch = stage * 2 + ch;
326 if (silent) 262 if (silent)
327 dma_addr = card->null_buffer_start_dma_addr; 263 dma_addr = card->null_buffer_start_dma_addr;
@@ -372,6 +308,71 @@ static int snd_ps3_program_dma(struct snd_ps3_card_info *card,
372} 308}
373 309
374/* 310/*
311 * Interrupt handler
312 */
313static irqreturn_t snd_ps3_interrupt(int irq, void *dev_id)
314{
315
316 uint32_t port_intr;
317 int underflow_occured = 0;
318 struct snd_ps3_card_info *card = dev_id;
319
320 if (!card->running) {
321 update_reg(PS3_AUDIO_AX_IS, 0);
322 update_reg(PS3_AUDIO_INTR_0, 0);
323 return IRQ_HANDLED;
324 }
325
326 port_intr = read_reg(PS3_AUDIO_AX_IS);
327 /*
328 *serial buffer empty detected (every 4 times),
329 *program next dma and kick it
330 */
331 if (port_intr & PS3_AUDIO_AX_IE_ASOBEIE(0)) {
332 write_reg(PS3_AUDIO_AX_IS, PS3_AUDIO_AX_IE_ASOBEIE(0));
333 if (port_intr & PS3_AUDIO_AX_IE_ASOBUIE(0)) {
334 write_reg(PS3_AUDIO_AX_IS, port_intr);
335 underflow_occured = 1;
336 }
337 if (card->silent) {
338 /* we are still in silent time */
339 snd_ps3_program_dma(card,
340 (underflow_occured) ?
341 SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL :
342 SND_PS3_DMA_FILLTYPE_SILENT_RUNNING);
343 snd_ps3_kick_dma(card);
344 card->silent--;
345 } else {
346 snd_ps3_program_dma(card,
347 (underflow_occured) ?
348 SND_PS3_DMA_FILLTYPE_FIRSTFILL :
349 SND_PS3_DMA_FILLTYPE_RUNNING);
350 snd_ps3_kick_dma(card);
351 snd_pcm_period_elapsed(card->substream);
352 }
353 } else if (port_intr & PS3_AUDIO_AX_IE_ASOBUIE(0)) {
354 write_reg(PS3_AUDIO_AX_IS, PS3_AUDIO_AX_IE_ASOBUIE(0));
355 /*
356 * serial out underflow, but buffer empty not detected.
357 * in this case, fill fifo with 0 to recover. After
358 * filling dummy data, serial automatically start to
359 * consume them and then will generate normal buffer
360 * empty interrupts.
361 * If both buffer underflow and buffer empty are occured,
362 * it is better to do nomal data transfer than empty one
363 */
364 snd_ps3_program_dma(card,
365 SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL);
366 snd_ps3_kick_dma(card);
367 snd_ps3_program_dma(card,
368 SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL);
369 snd_ps3_kick_dma(card);
370 }
371 /* clear interrupt cause */
372 return IRQ_HANDLED;
373};
374
375/*
375 * audio mute on/off 376 * audio mute on/off
376 * mute_on : 0 output enabled 377 * mute_on : 0 output enabled
377 * 1 mute 378 * 1 mute
@@ -382,6 +383,142 @@ static int snd_ps3_mute(int mute_on)
382} 383}
383 384
384/* 385/*
386 * av setting
387 * NOTE: calling this function may generate audio interrupt.
388 */
389static int snd_ps3_change_avsetting(struct snd_ps3_card_info *card)
390{
391 int ret, retries, i;
392 pr_debug("%s: start\n", __func__);
393
394 ret = ps3av_set_audio_mode(card->avs.avs_audio_ch,
395 card->avs.avs_audio_rate,
396 card->avs.avs_audio_width,
397 card->avs.avs_audio_format,
398 card->avs.avs_audio_source);
399 /*
400 * Reset the following unwanted settings:
401 */
402
403 /* disable all 3wire buffers */
404 update_mask_reg(PS3_AUDIO_AO_3WMCTRL,
405 ~(PS3_AUDIO_AO_3WMCTRL_ASOEN(0) |
406 PS3_AUDIO_AO_3WMCTRL_ASOEN(1) |
407 PS3_AUDIO_AO_3WMCTRL_ASOEN(2) |
408 PS3_AUDIO_AO_3WMCTRL_ASOEN(3)),
409 0);
410 wmb(); /* ensure the hardware sees the change */
411 /* wait for actually stopped */
412 retries = 1000;
413 while ((read_reg(PS3_AUDIO_AO_3WMCTRL) &
414 (PS3_AUDIO_AO_3WMCTRL_ASORUN(0) |
415 PS3_AUDIO_AO_3WMCTRL_ASORUN(1) |
416 PS3_AUDIO_AO_3WMCTRL_ASORUN(2) |
417 PS3_AUDIO_AO_3WMCTRL_ASORUN(3))) &&
418 --retries) {
419 udelay(1);
420 }
421
422 /* reset buffer pointer */
423 for (i = 0; i < 4; i++) {
424 update_reg(PS3_AUDIO_AO_3WCTRL(i),
425 PS3_AUDIO_AO_3WCTRL_ASOBRST_RESET);
426 udelay(10);
427 }
428 wmb(); /* ensure the hardware actually start resetting */
429
430 /* enable 3wire#0 buffer */
431 update_reg(PS3_AUDIO_AO_3WMCTRL, PS3_AUDIO_AO_3WMCTRL_ASOEN(0));
432
433
434 /* In 24bit mode,ALSA inserts a zero byte at first byte of per sample */
435 update_mask_reg(PS3_AUDIO_AO_3WCTRL(0),
436 ~PS3_AUDIO_AO_3WCTRL_ASODF,
437 PS3_AUDIO_AO_3WCTRL_ASODF_LSB);
438 update_mask_reg(PS3_AUDIO_AO_SPDCTRL(0),
439 ~PS3_AUDIO_AO_SPDCTRL_SPODF,
440 PS3_AUDIO_AO_SPDCTRL_SPODF_LSB);
441 /* ensure all the setting above is written back to register */
442 wmb();
443 /* avsetting driver altered AX_IE, caller must reset it if you want */
444 pr_debug("%s: end\n", __func__);
445 return ret;
446}
447
448/*
449 * set sampling rate according to the substream
450 */
451static int snd_ps3_set_avsetting(struct snd_pcm_substream *substream)
452{
453 struct snd_ps3_card_info *card = snd_pcm_substream_chip(substream);
454 struct snd_ps3_avsetting_info avs;
455 int ret;
456
457 avs = card->avs;
458
459 pr_debug("%s: called freq=%d width=%d\n", __func__,
460 substream->runtime->rate,
461 snd_pcm_format_width(substream->runtime->format));
462
463 pr_debug("%s: before freq=%d width=%d\n", __func__,
464 card->avs.avs_audio_rate, card->avs.avs_audio_width);
465
466 /* sample rate */
467 switch (substream->runtime->rate) {
468 case 44100:
469 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_44K;
470 break;
471 case 48000:
472 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_48K;
473 break;
474 case 88200:
475 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_88K;
476 break;
477 case 96000:
478 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_96K;
479 break;
480 default:
481 pr_info("%s: invalid rate %d\n", __func__,
482 substream->runtime->rate);
483 return 1;
484 }
485
486 /* width */
487 switch (snd_pcm_format_width(substream->runtime->format)) {
488 case 16:
489 avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_16;
490 break;
491 case 24:
492 avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_24;
493 break;
494 default:
495 pr_info("%s: invalid width %d\n", __func__,
496 snd_pcm_format_width(substream->runtime->format));
497 return 1;
498 }
499
500 memcpy(avs.avs_cs_info, ps3av_mode_cs_info, 8);
501
502 if (memcmp(&card->avs, &avs, sizeof(avs))) {
503 pr_debug("%s: after freq=%d width=%d\n", __func__,
504 card->avs.avs_audio_rate, card->avs.avs_audio_width);
505
506 card->avs = avs;
507 snd_ps3_change_avsetting(card);
508 ret = 0;
509 } else
510 ret = 1;
511
512 /* check CS non-audio bit and mute accordingly */
513 if (avs.avs_cs_info[0] & 0x02)
514 ps3av_audio_mute_analog(1); /* mute if non-audio */
515 else
516 ps3av_audio_mute_analog(0);
517
518 return ret;
519}
520
521/*
385 * PCM operators 522 * PCM operators
386 */ 523 */
387static int snd_ps3_pcm_open(struct snd_pcm_substream *substream) 524static int snd_ps3_pcm_open(struct snd_pcm_substream *substream)
@@ -406,6 +543,13 @@ static int snd_ps3_pcm_open(struct snd_pcm_substream *substream)
406 return 0; 543 return 0;
407}; 544};
408 545
546static int snd_ps3_pcm_close(struct snd_pcm_substream *substream)
547{
548 /* mute on */
549 snd_ps3_mute(1);
550 return 0;
551};
552
409static int snd_ps3_pcm_hw_params(struct snd_pcm_substream *substream, 553static int snd_ps3_pcm_hw_params(struct snd_pcm_substream *substream,
410 struct snd_pcm_hw_params *hw_params) 554 struct snd_pcm_hw_params *hw_params)
411{ 555{
@@ -417,6 +561,13 @@ static int snd_ps3_pcm_hw_params(struct snd_pcm_substream *substream,
417 return 0; 561 return 0;
418}; 562};
419 563
564static int snd_ps3_pcm_hw_free(struct snd_pcm_substream *substream)
565{
566 int ret;
567 ret = snd_pcm_lib_free_pages(substream);
568 return ret;
569};
570
420static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream, 571static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream,
421 unsigned int delay_ms) 572 unsigned int delay_ms)
422{ 573{
@@ -556,202 +707,6 @@ static snd_pcm_uframes_t snd_ps3_pcm_pointer(
556 return ret; 707 return ret;
557}; 708};
558 709
559static int snd_ps3_pcm_hw_free(struct snd_pcm_substream *substream)
560{
561 int ret;
562 ret = snd_pcm_lib_free_pages(substream);
563 return ret;
564};
565
566static int snd_ps3_pcm_close(struct snd_pcm_substream *substream)
567{
568 /* mute on */
569 snd_ps3_mute(1);
570 return 0;
571};
572
573static void snd_ps3_audio_fixup(struct snd_ps3_card_info *card)
574{
575 /*
576 * avsetting driver seems to never change the followings
577 * so, init them here once
578 */
579
580 /* no dma interrupt needed */
581 write_reg(PS3_AUDIO_INTR_EN_0, 0);
582
583 /* use every 4 buffer empty interrupt */
584 update_mask_reg(PS3_AUDIO_AX_IC,
585 PS3_AUDIO_AX_IC_AASOIMD_MASK,
586 PS3_AUDIO_AX_IC_AASOIMD_EVERY4);
587
588 /* enable 3wire clocks */
589 update_mask_reg(PS3_AUDIO_AO_3WMCTRL,
590 ~(PS3_AUDIO_AO_3WMCTRL_ASOBCLKD_DISABLED |
591 PS3_AUDIO_AO_3WMCTRL_ASOLRCKD_DISABLED),
592 0);
593 update_reg(PS3_AUDIO_AO_3WMCTRL,
594 PS3_AUDIO_AO_3WMCTRL_ASOPLRCK_DEFAULT);
595}
596
597/*
598 * av setting
599 * NOTE: calling this function may generate audio interrupt.
600 */
601static int snd_ps3_change_avsetting(struct snd_ps3_card_info *card)
602{
603 int ret, retries, i;
604 pr_debug("%s: start\n", __func__);
605
606 ret = ps3av_set_audio_mode(card->avs.avs_audio_ch,
607 card->avs.avs_audio_rate,
608 card->avs.avs_audio_width,
609 card->avs.avs_audio_format,
610 card->avs.avs_audio_source);
611 /*
612 * Reset the following unwanted settings:
613 */
614
615 /* disable all 3wire buffers */
616 update_mask_reg(PS3_AUDIO_AO_3WMCTRL,
617 ~(PS3_AUDIO_AO_3WMCTRL_ASOEN(0) |
618 PS3_AUDIO_AO_3WMCTRL_ASOEN(1) |
619 PS3_AUDIO_AO_3WMCTRL_ASOEN(2) |
620 PS3_AUDIO_AO_3WMCTRL_ASOEN(3)),
621 0);
622 wmb(); /* ensure the hardware sees the change */
623 /* wait for actually stopped */
624 retries = 1000;
625 while ((read_reg(PS3_AUDIO_AO_3WMCTRL) &
626 (PS3_AUDIO_AO_3WMCTRL_ASORUN(0) |
627 PS3_AUDIO_AO_3WMCTRL_ASORUN(1) |
628 PS3_AUDIO_AO_3WMCTRL_ASORUN(2) |
629 PS3_AUDIO_AO_3WMCTRL_ASORUN(3))) &&
630 --retries) {
631 udelay(1);
632 }
633
634 /* reset buffer pointer */
635 for (i = 0; i < 4; i++) {
636 update_reg(PS3_AUDIO_AO_3WCTRL(i),
637 PS3_AUDIO_AO_3WCTRL_ASOBRST_RESET);
638 udelay(10);
639 }
640 wmb(); /* ensure the hardware actually start resetting */
641
642 /* enable 3wire#0 buffer */
643 update_reg(PS3_AUDIO_AO_3WMCTRL, PS3_AUDIO_AO_3WMCTRL_ASOEN(0));
644
645
646 /* In 24bit mode,ALSA inserts a zero byte at first byte of per sample */
647 update_mask_reg(PS3_AUDIO_AO_3WCTRL(0),
648 ~PS3_AUDIO_AO_3WCTRL_ASODF,
649 PS3_AUDIO_AO_3WCTRL_ASODF_LSB);
650 update_mask_reg(PS3_AUDIO_AO_SPDCTRL(0),
651 ~PS3_AUDIO_AO_SPDCTRL_SPODF,
652 PS3_AUDIO_AO_SPDCTRL_SPODF_LSB);
653 /* ensure all the setting above is written back to register */
654 wmb();
655 /* avsetting driver altered AX_IE, caller must reset it if you want */
656 pr_debug("%s: end\n", __func__);
657 return ret;
658}
659
660static int snd_ps3_init_avsetting(struct snd_ps3_card_info *card)
661{
662 int ret;
663 pr_debug("%s: start\n", __func__);
664 card->avs.avs_audio_ch = PS3AV_CMD_AUDIO_NUM_OF_CH_2;
665 card->avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_48K;
666 card->avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_16;
667 card->avs.avs_audio_format = PS3AV_CMD_AUDIO_FORMAT_PCM;
668 card->avs.avs_audio_source = PS3AV_CMD_AUDIO_SOURCE_SERIAL;
669 memcpy(card->avs.avs_cs_info, ps3av_mode_cs_info, 8);
670
671 ret = snd_ps3_change_avsetting(card);
672
673 snd_ps3_audio_fixup(card);
674
675 /* to start to generate SPDIF signal, fill data */
676 snd_ps3_program_dma(card, SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL);
677 snd_ps3_kick_dma(card);
678 pr_debug("%s: end\n", __func__);
679 return ret;
680}
681
682/*
683 * set sampling rate according to the substream
684 */
685static int snd_ps3_set_avsetting(struct snd_pcm_substream *substream)
686{
687 struct snd_ps3_card_info *card = snd_pcm_substream_chip(substream);
688 struct snd_ps3_avsetting_info avs;
689 int ret;
690
691 avs = card->avs;
692
693 pr_debug("%s: called freq=%d width=%d\n", __func__,
694 substream->runtime->rate,
695 snd_pcm_format_width(substream->runtime->format));
696
697 pr_debug("%s: before freq=%d width=%d\n", __func__,
698 card->avs.avs_audio_rate, card->avs.avs_audio_width);
699
700 /* sample rate */
701 switch (substream->runtime->rate) {
702 case 44100:
703 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_44K;
704 break;
705 case 48000:
706 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_48K;
707 break;
708 case 88200:
709 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_88K;
710 break;
711 case 96000:
712 avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_96K;
713 break;
714 default:
715 pr_info("%s: invalid rate %d\n", __func__,
716 substream->runtime->rate);
717 return 1;
718 }
719
720 /* width */
721 switch (snd_pcm_format_width(substream->runtime->format)) {
722 case 16:
723 avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_16;
724 break;
725 case 24:
726 avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_24;
727 break;
728 default:
729 pr_info("%s: invalid width %d\n", __func__,
730 snd_pcm_format_width(substream->runtime->format));
731 return 1;
732 }
733
734 memcpy(avs.avs_cs_info, ps3av_mode_cs_info, 8);
735
736 if (memcmp(&card->avs, &avs, sizeof(avs))) {
737 pr_debug("%s: after freq=%d width=%d\n", __func__,
738 card->avs.avs_audio_rate, card->avs.avs_audio_width);
739
740 card->avs = avs;
741 snd_ps3_change_avsetting(card);
742 ret = 0;
743 } else
744 ret = 1;
745
746 /* check CS non-audio bit and mute accordingly */
747 if (avs.avs_cs_info[0] & 0x02)
748 ps3av_audio_mute_analog(1); /* mute if non-audio */
749 else
750 ps3av_audio_mute_analog(0);
751
752 return ret;
753}
754
755/* 710/*
756 * SPDIF status bits controls 711 * SPDIF status bits controls
757 */ 712 */
@@ -798,28 +753,39 @@ static struct snd_kcontrol_new spdif_ctls[] = {
798 { 753 {
799 .access = SNDRV_CTL_ELEM_ACCESS_READ, 754 .access = SNDRV_CTL_ELEM_ACCESS_READ,
800 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 755 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
801 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 756 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
802 .info = snd_ps3_spdif_mask_info, 757 .info = snd_ps3_spdif_mask_info,
803 .get = snd_ps3_spdif_cmask_get, 758 .get = snd_ps3_spdif_cmask_get,
804 }, 759 },
805 { 760 {
806 .access = SNDRV_CTL_ELEM_ACCESS_READ, 761 .access = SNDRV_CTL_ELEM_ACCESS_READ,
807 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 762 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
808 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), 763 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK),
809 .info = snd_ps3_spdif_mask_info, 764 .info = snd_ps3_spdif_mask_info,
810 .get = snd_ps3_spdif_pmask_get, 765 .get = snd_ps3_spdif_pmask_get,
811 }, 766 },
812 { 767 {
813 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 768 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
814 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), 769 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
815 .info = snd_ps3_spdif_mask_info, 770 .info = snd_ps3_spdif_mask_info,
816 .get = snd_ps3_spdif_default_get, 771 .get = snd_ps3_spdif_default_get,
817 .put = snd_ps3_spdif_default_put, 772 .put = snd_ps3_spdif_default_put,
818 }, 773 },
819}; 774};
820 775
776static struct snd_pcm_ops snd_ps3_pcm_spdif_ops = {
777 .open = snd_ps3_pcm_open,
778 .close = snd_ps3_pcm_close,
779 .ioctl = snd_pcm_lib_ioctl,
780 .hw_params = snd_ps3_pcm_hw_params,
781 .hw_free = snd_ps3_pcm_hw_free,
782 .prepare = snd_ps3_pcm_prepare,
783 .trigger = snd_ps3_pcm_trigger,
784 .pointer = snd_ps3_pcm_pointer,
785};
786
821 787
822static int snd_ps3_map_mmio(void) 788static int __devinit snd_ps3_map_mmio(void)
823{ 789{
824 the_card.mapped_mmio_vaddr = 790 the_card.mapped_mmio_vaddr =
825 ioremap(the_card.ps3_dev->m_region->bus_addr, 791 ioremap(the_card.ps3_dev->m_region->bus_addr,
@@ -841,7 +807,7 @@ static void snd_ps3_unmap_mmio(void)
841 the_card.mapped_mmio_vaddr = NULL; 807 the_card.mapped_mmio_vaddr = NULL;
842} 808}
843 809
844static int snd_ps3_allocate_irq(void) 810static int __devinit snd_ps3_allocate_irq(void)
845{ 811{
846 int ret; 812 int ret;
847 u64 lpar_addr, lpar_size; 813 u64 lpar_addr, lpar_size;
@@ -899,7 +865,7 @@ static void snd_ps3_free_irq(void)
899 ps3_irq_plug_destroy(the_card.irq_no); 865 ps3_irq_plug_destroy(the_card.irq_no);
900} 866}
901 867
902static void snd_ps3_audio_set_base_addr(uint64_t ioaddr_start) 868static void __devinit snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
903{ 869{
904 uint64_t val; 870 uint64_t val;
905 int ret; 871 int ret;
@@ -915,7 +881,53 @@ static void snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
915 ret); 881 ret);
916} 882}
917 883
918static int __init snd_ps3_driver_probe(struct ps3_system_bus_device *dev) 884static void __devinit snd_ps3_audio_fixup(struct snd_ps3_card_info *card)
885{
886 /*
887 * avsetting driver seems to never change the followings
888 * so, init them here once
889 */
890
891 /* no dma interrupt needed */
892 write_reg(PS3_AUDIO_INTR_EN_0, 0);
893
894 /* use every 4 buffer empty interrupt */
895 update_mask_reg(PS3_AUDIO_AX_IC,
896 PS3_AUDIO_AX_IC_AASOIMD_MASK,
897 PS3_AUDIO_AX_IC_AASOIMD_EVERY4);
898
899 /* enable 3wire clocks */
900 update_mask_reg(PS3_AUDIO_AO_3WMCTRL,
901 ~(PS3_AUDIO_AO_3WMCTRL_ASOBCLKD_DISABLED |
902 PS3_AUDIO_AO_3WMCTRL_ASOLRCKD_DISABLED),
903 0);
904 update_reg(PS3_AUDIO_AO_3WMCTRL,
905 PS3_AUDIO_AO_3WMCTRL_ASOPLRCK_DEFAULT);
906}
907
908static int __devinit snd_ps3_init_avsetting(struct snd_ps3_card_info *card)
909{
910 int ret;
911 pr_debug("%s: start\n", __func__);
912 card->avs.avs_audio_ch = PS3AV_CMD_AUDIO_NUM_OF_CH_2;
913 card->avs.avs_audio_rate = PS3AV_CMD_AUDIO_FS_48K;
914 card->avs.avs_audio_width = PS3AV_CMD_AUDIO_WORD_BITS_16;
915 card->avs.avs_audio_format = PS3AV_CMD_AUDIO_FORMAT_PCM;
916 card->avs.avs_audio_source = PS3AV_CMD_AUDIO_SOURCE_SERIAL;
917 memcpy(card->avs.avs_cs_info, ps3av_mode_cs_info, 8);
918
919 ret = snd_ps3_change_avsetting(card);
920
921 snd_ps3_audio_fixup(card);
922
923 /* to start to generate SPDIF signal, fill data */
924 snd_ps3_program_dma(card, SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL);
925 snd_ps3_kick_dma(card);
926 pr_debug("%s: end\n", __func__);
927 return ret;
928}
929
930static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
919{ 931{
920 int i, ret; 932 int i, ret;
921 u64 lpar_addr, lpar_size; 933 u64 lpar_addr, lpar_size;
@@ -1020,11 +1032,12 @@ static int __init snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
1020 * its size should be lager than PS3_AUDIO_FIFO_STAGE_SIZE * 2 1032 * its size should be lager than PS3_AUDIO_FIFO_STAGE_SIZE * 2
1021 * PAGE_SIZE is enogh 1033 * PAGE_SIZE is enogh
1022 */ 1034 */
1023 if (!(the_card.null_buffer_start_vaddr = 1035 the_card.null_buffer_start_vaddr =
1024 dma_alloc_coherent(&the_card.ps3_dev->core, 1036 dma_alloc_coherent(&the_card.ps3_dev->core,
1025 PAGE_SIZE, 1037 PAGE_SIZE,
1026 &the_card.null_buffer_start_dma_addr, 1038 &the_card.null_buffer_start_dma_addr,
1027 GFP_KERNEL))) { 1039 GFP_KERNEL);
1040 if (!the_card.null_buffer_start_vaddr) {
1028 pr_info("%s: nullbuffer alloc failed\n", __func__); 1041 pr_info("%s: nullbuffer alloc failed\n", __func__);
1029 goto clean_preallocate; 1042 goto clean_preallocate;
1030 } 1043 }
@@ -1115,71 +1128,6 @@ static struct ps3_system_bus_driver snd_ps3_bus_driver_info = {
1115 1128
1116 1129
1117/* 1130/*
1118 * Interrupt handler
1119 */
1120static irqreturn_t snd_ps3_interrupt(int irq, void *dev_id)
1121{
1122
1123 uint32_t port_intr;
1124 int underflow_occured = 0;
1125 struct snd_ps3_card_info *card = dev_id;
1126
1127 if (!card->running) {
1128 update_reg(PS3_AUDIO_AX_IS, 0);
1129 update_reg(PS3_AUDIO_INTR_0, 0);
1130 return IRQ_HANDLED;
1131 }
1132
1133 port_intr = read_reg(PS3_AUDIO_AX_IS);
1134 /*
1135 *serial buffer empty detected (every 4 times),
1136 *program next dma and kick it
1137 */
1138 if (port_intr & PS3_AUDIO_AX_IE_ASOBEIE(0)) {
1139 write_reg(PS3_AUDIO_AX_IS, PS3_AUDIO_AX_IE_ASOBEIE(0));
1140 if (port_intr & PS3_AUDIO_AX_IE_ASOBUIE(0)) {
1141 write_reg(PS3_AUDIO_AX_IS, port_intr);
1142 underflow_occured = 1;
1143 }
1144 if (card->silent) {
1145 /* we are still in silent time */
1146 snd_ps3_program_dma(card,
1147 (underflow_occured) ?
1148 SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL :
1149 SND_PS3_DMA_FILLTYPE_SILENT_RUNNING);
1150 snd_ps3_kick_dma(card);
1151 card->silent --;
1152 } else {
1153 snd_ps3_program_dma(card,
1154 (underflow_occured) ?
1155 SND_PS3_DMA_FILLTYPE_FIRSTFILL :
1156 SND_PS3_DMA_FILLTYPE_RUNNING);
1157 snd_ps3_kick_dma(card);
1158 snd_pcm_period_elapsed(card->substream);
1159 }
1160 } else if (port_intr & PS3_AUDIO_AX_IE_ASOBUIE(0)) {
1161 write_reg(PS3_AUDIO_AX_IS, PS3_AUDIO_AX_IE_ASOBUIE(0));
1162 /*
1163 * serial out underflow, but buffer empty not detected.
1164 * in this case, fill fifo with 0 to recover. After
1165 * filling dummy data, serial automatically start to
1166 * consume them and then will generate normal buffer
1167 * empty interrupts.
1168 * If both buffer underflow and buffer empty are occured,
1169 * it is better to do nomal data transfer than empty one
1170 */
1171 snd_ps3_program_dma(card,
1172 SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL);
1173 snd_ps3_kick_dma(card);
1174 snd_ps3_program_dma(card,
1175 SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL);
1176 snd_ps3_kick_dma(card);
1177 }
1178 /* clear interrupt cause */
1179 return IRQ_HANDLED;
1180};
1181
1182/*
1183 * module/subsystem initialize/terminate 1131 * module/subsystem initialize/terminate
1184 */ 1132 */
1185static int __init snd_ps3_init(void) 1133static int __init snd_ps3_init(void)
@@ -1197,10 +1145,15 @@ static int __init snd_ps3_init(void)
1197 1145
1198 return ret; 1146 return ret;
1199} 1147}
1148module_init(snd_ps3_init);
1200 1149
1201static void __exit snd_ps3_exit(void) 1150static void __exit snd_ps3_exit(void)
1202{ 1151{
1203 ps3_system_bus_driver_unregister(&snd_ps3_bus_driver_info); 1152 ps3_system_bus_driver_unregister(&snd_ps3_bus_driver_info);
1204} 1153}
1154module_exit(snd_ps3_exit);
1205 1155
1156MODULE_LICENSE("GPL v2");
1157MODULE_DESCRIPTION("PS3 sound driver");
1158MODULE_AUTHOR("Sony Computer Entertainment Inc.");
1206MODULE_ALIAS(PS3_MODULE_ALIAS_SOUND); 1159MODULE_ALIAS(PS3_MODULE_ALIAS_SOUND);
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 40222fcc0878..08e584d1453a 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -838,7 +838,7 @@ static int snapper_put_capture_source(struct snd_kcontrol *kcontrol,
838 838
839/* 839/*
840 */ 840 */
841static struct snd_kcontrol_new tumbler_mixers[] __initdata = { 841static struct snd_kcontrol_new tumbler_mixers[] __devinitdata = {
842 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 842 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
843 .name = "Master Playback Volume", 843 .name = "Master Playback Volume",
844 .info = tumbler_info_master_volume, 844 .info = tumbler_info_master_volume,
@@ -862,7 +862,7 @@ static struct snd_kcontrol_new tumbler_mixers[] __initdata = {
862 }, 862 },
863}; 863};
864 864
865static struct snd_kcontrol_new snapper_mixers[] __initdata = { 865static struct snd_kcontrol_new snapper_mixers[] __devinitdata = {
866 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 866 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
867 .name = "Master Playback Volume", 867 .name = "Master Playback Volume",
868 .info = tumbler_info_master_volume, 868 .info = tumbler_info_master_volume,
@@ -895,7 +895,7 @@ static struct snd_kcontrol_new snapper_mixers[] __initdata = {
895 }, 895 },
896}; 896};
897 897
898static struct snd_kcontrol_new tumbler_hp_sw __initdata = { 898static struct snd_kcontrol_new tumbler_hp_sw __devinitdata = {
899 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 899 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
900 .name = "Headphone Playback Switch", 900 .name = "Headphone Playback Switch",
901 .info = snd_pmac_boolean_mono_info, 901 .info = snd_pmac_boolean_mono_info,
@@ -903,7 +903,7 @@ static struct snd_kcontrol_new tumbler_hp_sw __initdata = {
903 .put = tumbler_put_mute_switch, 903 .put = tumbler_put_mute_switch,
904 .private_value = TUMBLER_MUTE_HP, 904 .private_value = TUMBLER_MUTE_HP,
905}; 905};
906static struct snd_kcontrol_new tumbler_speaker_sw __initdata = { 906static struct snd_kcontrol_new tumbler_speaker_sw __devinitdata = {
907 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 907 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
908 .name = "PC Speaker Playback Switch", 908 .name = "PC Speaker Playback Switch",
909 .info = snd_pmac_boolean_mono_info, 909 .info = snd_pmac_boolean_mono_info,
@@ -911,7 +911,7 @@ static struct snd_kcontrol_new tumbler_speaker_sw __initdata = {
911 .put = tumbler_put_mute_switch, 911 .put = tumbler_put_mute_switch,
912 .private_value = TUMBLER_MUTE_AMP, 912 .private_value = TUMBLER_MUTE_AMP,
913}; 913};
914static struct snd_kcontrol_new tumbler_lineout_sw __initdata = { 914static struct snd_kcontrol_new tumbler_lineout_sw __devinitdata = {
915 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 915 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
916 .name = "Line Out Playback Switch", 916 .name = "Line Out Playback Switch",
917 .info = snd_pmac_boolean_mono_info, 917 .info = snd_pmac_boolean_mono_info,
@@ -919,7 +919,7 @@ static struct snd_kcontrol_new tumbler_lineout_sw __initdata = {
919 .put = tumbler_put_mute_switch, 919 .put = tumbler_put_mute_switch,
920 .private_value = TUMBLER_MUTE_LINE, 920 .private_value = TUMBLER_MUTE_LINE,
921}; 921};
922static struct snd_kcontrol_new tumbler_drc_sw __initdata = { 922static struct snd_kcontrol_new tumbler_drc_sw __devinitdata = {
923 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 923 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
924 .name = "DRC Switch", 924 .name = "DRC Switch",
925 .info = snd_pmac_boolean_mono_info, 925 .info = snd_pmac_boolean_mono_info,
@@ -1269,7 +1269,7 @@ static void tumbler_resume(struct snd_pmac *chip)
1269#endif 1269#endif
1270 1270
1271/* initialize tumbler */ 1271/* initialize tumbler */
1272static int __init tumbler_init(struct snd_pmac *chip) 1272static int __devinit tumbler_init(struct snd_pmac *chip)
1273{ 1273{
1274 int irq; 1274 int irq;
1275 struct pmac_tumbler *mix = chip->mixer_data; 1275 struct pmac_tumbler *mix = chip->mixer_data;
@@ -1339,7 +1339,7 @@ static void tumbler_cleanup(struct snd_pmac *chip)
1339} 1339}
1340 1340
1341/* exported */ 1341/* exported */
1342int __init snd_pmac_tumbler_init(struct snd_pmac *chip) 1342int __devinit snd_pmac_tumbler_init(struct snd_pmac *chip)
1343{ 1343{
1344 int i, err; 1344 int i, err;
1345 struct pmac_tumbler *mix; 1345 struct pmac_tumbler *mix;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 3d2bb6fc6dcc..d3e786a9a0a7 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -32,7 +32,9 @@ source "sound/soc/fsl/Kconfig"
32source "sound/soc/omap/Kconfig" 32source "sound/soc/omap/Kconfig"
33source "sound/soc/pxa/Kconfig" 33source "sound/soc/pxa/Kconfig"
34source "sound/soc/s3c24xx/Kconfig" 34source "sound/soc/s3c24xx/Kconfig"
35source "sound/soc/s6000/Kconfig"
35source "sound/soc/sh/Kconfig" 36source "sound/soc/sh/Kconfig"
37source "sound/soc/txx9/Kconfig"
36 38
37# Supported codecs 39# Supported codecs
38source "sound/soc/codecs/Kconfig" 40source "sound/soc/codecs/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 0237879fd412..6f1e28de23cf 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -10,4 +10,6 @@ obj-$(CONFIG_SND_SOC) += fsl/
10obj-$(CONFIG_SND_SOC) += omap/ 10obj-$(CONFIG_SND_SOC) += omap/
11obj-$(CONFIG_SND_SOC) += pxa/ 11obj-$(CONFIG_SND_SOC) += pxa/
12obj-$(CONFIG_SND_SOC) += s3c24xx/ 12obj-$(CONFIG_SND_SOC) += s3c24xx/
13obj-$(CONFIG_SND_SOC) += s6000/
13obj-$(CONFIG_SND_SOC) += sh/ 14obj-$(CONFIG_SND_SOC) += sh/
15obj-$(CONFIG_SND_SOC) += txx9/
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index a608d7009dbd..e720d5e6f04c 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -41,3 +41,11 @@ config SND_AT32_SOC_PLAYPAQ_SLAVE
41 and FRAME signals on the PlayPaq. Unless you want to play 41 and FRAME signals on the PlayPaq. Unless you want to play
42 with the AT32 as the SSC master, you probably want to say N here, 42 with the AT32 as the SSC master, you probably want to say N here,
43 as this will give you better sound quality. 43 as this will give you better sound quality.
44
45config SND_AT91_SOC_AFEB9260
46 tristate "SoC Audio support for AFEB9260 board"
47 depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
48 select SND_ATMEL_SOC_SSC
49 select SND_SOC_TLV320AIC23
50 help
51 Say Y here to support sound on AFEB9260 board.
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index f54a7cc68e66..e7ea56bd5f82 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -13,3 +13,4 @@ snd-soc-playpaq-objs := playpaq_wm8510.o
13 13
14obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o 14obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
15obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o 15obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
16obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
index 70657534e6b1..9eb610c2ba91 100644
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ b/sound/soc/atmel/playpaq_wm8510.c
@@ -117,7 +117,7 @@ static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
117 * Find actual rate, compare to requested rate 117 * Find actual rate, compare to requested rate
118 */ 118 */
119 actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1)); 119 actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
120 pr_debug("playpaq_wm8510: Request rate = %d, actual rate = %d\n", 120 pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
121 rate, actual_rate); 121 rate, actual_rate);
122 122
123 123
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
new file mode 100644
index 000000000000..23349de27313
--- /dev/null
+++ b/sound/soc/atmel/snd-soc-afeb9260.c
@@ -0,0 +1,203 @@
1/*
2 * afeb9260.c -- SoC audio for AFEB9260
3 *
4 * Copyright (C) 2009 Sergey Lapin <slapin@ossfans.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27
28#include <linux/atmel-ssc.h>
29#include <sound/core.h>
30#include <sound/pcm.h>
31#include <sound/pcm_params.h>
32#include <sound/soc.h>
33#include <sound/soc-dapm.h>
34
35#include <asm/mach-types.h>
36#include <mach/hardware.h>
37#include <linux/gpio.h>
38
39#include "../codecs/tlv320aic23.h"
40#include "atmel-pcm.h"
41#include "atmel_ssc_dai.h"
42
43#define CODEC_CLOCK 12000000
44
45static int afeb9260_hw_params(struct snd_pcm_substream *substream,
46 struct snd_pcm_hw_params *params)
47{
48 struct snd_soc_pcm_runtime *rtd = substream->private_data;
49 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
50 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
51 int err;
52
53 /* Set codec DAI configuration */
54 err = snd_soc_dai_set_fmt(codec_dai,
55 SND_SOC_DAIFMT_I2S|
56 SND_SOC_DAIFMT_NB_IF |
57 SND_SOC_DAIFMT_CBM_CFM);
58 if (err < 0) {
59 printk(KERN_ERR "can't set codec DAI configuration\n");
60 return err;
61 }
62
63 /* Set cpu DAI configuration */
64 err = snd_soc_dai_set_fmt(cpu_dai,
65 SND_SOC_DAIFMT_I2S |
66 SND_SOC_DAIFMT_NB_IF |
67 SND_SOC_DAIFMT_CBM_CFM);
68 if (err < 0) {
69 printk(KERN_ERR "can't set cpu DAI configuration\n");
70 return err;
71 }
72
73 /* Set the codec system clock for DAC and ADC */
74 err =
75 snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_IN);
76
77 if (err < 0) {
78 printk(KERN_ERR "can't set codec system clock\n");
79 return err;
80 }
81
82 return err;
83}
84
85static struct snd_soc_ops afeb9260_ops = {
86 .hw_params = afeb9260_hw_params,
87};
88
89static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = {
90 SND_SOC_DAPM_HP("Headphone Jack", NULL),
91 SND_SOC_DAPM_LINE("Line In", NULL),
92 SND_SOC_DAPM_MIC("Mic Jack", NULL),
93};
94
95static const struct snd_soc_dapm_route audio_map[] = {
96 {"Headphone Jack", NULL, "LHPOUT"},
97 {"Headphone Jack", NULL, "RHPOUT"},
98
99 {"LLINEIN", NULL, "Line In"},
100 {"RLINEIN", NULL, "Line In"},
101
102 {"MICIN", NULL, "Mic Jack"},
103};
104
105static int afeb9260_tlv320aic23_init(struct snd_soc_codec *codec)
106{
107
108 /* Add afeb9260 specific widgets */
109 snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
110 ARRAY_SIZE(tlv320aic23_dapm_widgets));
111
112 /* Set up afeb9260 specific audio path audio_map */
113 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
114
115 snd_soc_dapm_enable_pin(codec, "Headphone Jack");
116 snd_soc_dapm_enable_pin(codec, "Line In");
117 snd_soc_dapm_enable_pin(codec, "Mic Jack");
118
119 snd_soc_dapm_sync(codec);
120
121 return 0;
122}
123
124/* Digital audio interface glue - connects codec <--> CPU */
125static struct snd_soc_dai_link afeb9260_dai = {
126 .name = "TLV320AIC23",
127 .stream_name = "AIC23",
128 .cpu_dai = &atmel_ssc_dai[0],
129 .codec_dai = &tlv320aic23_dai,
130 .init = afeb9260_tlv320aic23_init,
131 .ops = &afeb9260_ops,
132};
133
134/* Audio machine driver */
135static struct snd_soc_card snd_soc_machine_afeb9260 = {
136 .name = "AFEB9260",
137 .platform = &atmel_soc_platform,
138 .dai_link = &afeb9260_dai,
139 .num_links = 1,
140};
141
142/* Audio subsystem */
143static struct snd_soc_device afeb9260_snd_devdata = {
144 .card = &snd_soc_machine_afeb9260,
145 .codec_dev = &soc_codec_dev_tlv320aic23,
146};
147
148static struct platform_device *afeb9260_snd_device;
149
150static int __init afeb9260_soc_init(void)
151{
152 int err;
153 struct device *dev;
154 struct atmel_ssc_info *ssc_p = afeb9260_dai.cpu_dai->private_data;
155 struct ssc_device *ssc = NULL;
156
157 if (!(machine_is_afeb9260()))
158 return -ENODEV;
159
160 ssc = ssc_request(0);
161 if (IS_ERR(ssc)) {
162 printk(KERN_ERR "ASoC: Failed to request SSC 0\n");
163 err = PTR_ERR(ssc);
164 ssc = NULL;
165 goto err_ssc;
166 }
167 ssc_p->ssc = ssc;
168
169 afeb9260_snd_device = platform_device_alloc("soc-audio", -1);
170 if (!afeb9260_snd_device) {
171 printk(KERN_ERR "ASoC: Platform device allocation failed\n");
172 return -ENOMEM;
173 }
174
175 platform_set_drvdata(afeb9260_snd_device, &afeb9260_snd_devdata);
176 afeb9260_snd_devdata.dev = &afeb9260_snd_device->dev;
177 err = platform_device_add(afeb9260_snd_device);
178 if (err)
179 goto err1;
180
181 dev = &afeb9260_snd_device->dev;
182
183 return 0;
184err1:
185 platform_device_del(afeb9260_snd_device);
186 platform_device_put(afeb9260_snd_device);
187err_ssc:
188 return err;
189
190}
191
192static void __exit afeb9260_soc_exit(void)
193{
194 platform_device_unregister(afeb9260_snd_device);
195}
196
197module_init(afeb9260_soc_init);
198module_exit(afeb9260_soc_exit);
199
200MODULE_AUTHOR("Sergey Lapin <slapin@ossfans.org>");
201MODULE_DESCRIPTION("ALSA SoC for AFEB9260");
202MODULE_LICENSE("GPL");
203
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index 8a935f2d1767..b1ed423fabd5 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -31,6 +31,15 @@
31#include "bf5xx-sport.h" 31#include "bf5xx-sport.h"
32#include "bf5xx-ac97.h" 32#include "bf5xx-ac97.h"
33 33
34/* Anomaly notes:
35 * 05000250 - AD1980 is running in TDM mode and RFS/TFS are generated by SPORT
36 * contrtoller. But, RFSDIV and TFSDIV are always set to 16*16-1,
37 * while the max AC97 data size is 13*16. The DIV is always larger
38 * than data size. AD73311 and ad2602 are not running in TDM mode.
39 * AD1836 and AD73322 depend on external RFS/TFS only. So, this
40 * anomaly does not affect blackfin sound drivers.
41*/
42
34static int *cmd_count; 43static int *cmd_count;
35static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM; 44static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM;
36 45
diff --git a/sound/soc/blackfin/bf5xx-sport.c b/sound/soc/blackfin/bf5xx-sport.c
index b7953c8cf838..469ce7fab20c 100644
--- a/sound/soc/blackfin/bf5xx-sport.c
+++ b/sound/soc/blackfin/bf5xx-sport.c
@@ -190,7 +190,7 @@ static inline int sport_hook_rx_dummy(struct sport_device *sport)
190 desc = get_dma_next_desc_ptr(sport->dma_rx_chan); 190 desc = get_dma_next_desc_ptr(sport->dma_rx_chan);
191 /* Copy the descriptor which will be damaged to backup */ 191 /* Copy the descriptor which will be damaged to backup */
192 temp_desc = *desc; 192 temp_desc = *desc;
193 desc->x_count = 0xa; 193 desc->x_count = sport->dummy_count / 2;
194 desc->y_count = 0; 194 desc->y_count = 0;
195 desc->next_desc_addr = sport->dummy_rx_desc; 195 desc->next_desc_addr = sport->dummy_rx_desc;
196 local_irq_restore(flags); 196 local_irq_restore(flags);
@@ -309,7 +309,7 @@ static inline int sport_hook_tx_dummy(struct sport_device *sport)
309 desc = get_dma_next_desc_ptr(sport->dma_tx_chan); 309 desc = get_dma_next_desc_ptr(sport->dma_tx_chan);
310 /* Store the descriptor which will be damaged */ 310 /* Store the descriptor which will be damaged */
311 temp_desc = *desc; 311 temp_desc = *desc;
312 desc->x_count = 0xa; 312 desc->x_count = sport->dummy_count / 2;
313 desc->y_count = 0; 313 desc->y_count = 0;
314 desc->next_desc_addr = sport->dummy_tx_desc; 314 desc->next_desc_addr = sport->dummy_tx_desc;
315 local_irq_restore(flags); 315 local_irq_restore(flags);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index b6c7f7a01cb0..bbc97fd76648 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -18,7 +18,9 @@ config SND_SOC_ALL_CODECS
18 select SND_SOC_AK4535 if I2C 18 select SND_SOC_AK4535 if I2C
19 select SND_SOC_CS4270 if I2C 19 select SND_SOC_CS4270 if I2C
20 select SND_SOC_PCM3008 20 select SND_SOC_PCM3008
21 select SND_SOC_SPDIF
21 select SND_SOC_SSM2602 if I2C 22 select SND_SOC_SSM2602 if I2C
23 select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
22 select SND_SOC_TLV320AIC23 if I2C 24 select SND_SOC_TLV320AIC23 if I2C
23 select SND_SOC_TLV320AIC26 if SPI_MASTER 25 select SND_SOC_TLV320AIC26 if SPI_MASTER
24 select SND_SOC_TLV320AIC3X if I2C 26 select SND_SOC_TLV320AIC3X if I2C
@@ -35,8 +37,12 @@ config SND_SOC_ALL_CODECS
35 select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI 37 select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
36 select SND_SOC_WM8900 if I2C 38 select SND_SOC_WM8900 if I2C
37 select SND_SOC_WM8903 if I2C 39 select SND_SOC_WM8903 if I2C
40 select SND_SOC_WM8940 if I2C
41 select SND_SOC_WM8960 if I2C
38 select SND_SOC_WM8971 if I2C 42 select SND_SOC_WM8971 if I2C
43 select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI
39 select SND_SOC_WM8990 if I2C 44 select SND_SOC_WM8990 if I2C
45 select SND_SOC_WM9081 if I2C
40 select SND_SOC_WM9705 if SND_SOC_AC97_BUS 46 select SND_SOC_WM9705 if SND_SOC_AC97_BUS
41 select SND_SOC_WM9712 if SND_SOC_AC97_BUS 47 select SND_SOC_WM9712 if SND_SOC_AC97_BUS
42 select SND_SOC_WM9713 if SND_SOC_AC97_BUS 48 select SND_SOC_WM9713 if SND_SOC_AC97_BUS
@@ -86,9 +92,15 @@ config SND_SOC_L3
86config SND_SOC_PCM3008 92config SND_SOC_PCM3008
87 tristate 93 tristate
88 94
95config SND_SOC_SPDIF
96 tristate
97
89config SND_SOC_SSM2602 98config SND_SOC_SSM2602
90 tristate 99 tristate
91 100
101config SND_SOC_STAC9766
102 tristate
103
92config SND_SOC_TLV320AIC23 104config SND_SOC_TLV320AIC23
93 tristate 105 tristate
94 106
@@ -138,12 +150,24 @@ config SND_SOC_WM8900
138config SND_SOC_WM8903 150config SND_SOC_WM8903
139 tristate 151 tristate
140 152
153config SND_SOC_WM8940
154 tristate
155
156config SND_SOC_WM8960
157 tristate
158
141config SND_SOC_WM8971 159config SND_SOC_WM8971
142 tristate 160 tristate
143 161
162config SND_SOC_WM8988
163 tristate
164
144config SND_SOC_WM8990 165config SND_SOC_WM8990
145 tristate 166 tristate
146 167
168config SND_SOC_WM9081
169 tristate
170
147config SND_SOC_WM9705 171config SND_SOC_WM9705
148 tristate 172 tristate
149 173
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f2653803ede8..8b7530546f4d 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -6,7 +6,9 @@ snd-soc-ak4535-objs := ak4535.o
6snd-soc-cs4270-objs := cs4270.o 6snd-soc-cs4270-objs := cs4270.o
7snd-soc-l3-objs := l3.o 7snd-soc-l3-objs := l3.o
8snd-soc-pcm3008-objs := pcm3008.o 8snd-soc-pcm3008-objs := pcm3008.o
9snd-soc-spdif-objs := spdif_transciever.o
9snd-soc-ssm2602-objs := ssm2602.o 10snd-soc-ssm2602-objs := ssm2602.o
11snd-soc-stac9766-objs := stac9766.o
10snd-soc-tlv320aic23-objs := tlv320aic23.o 12snd-soc-tlv320aic23-objs := tlv320aic23.o
11snd-soc-tlv320aic26-objs := tlv320aic26.o 13snd-soc-tlv320aic26-objs := tlv320aic26.o
12snd-soc-tlv320aic3x-objs := tlv320aic3x.o 14snd-soc-tlv320aic3x-objs := tlv320aic3x.o
@@ -23,8 +25,12 @@ snd-soc-wm8750-objs := wm8750.o
23snd-soc-wm8753-objs := wm8753.o 25snd-soc-wm8753-objs := wm8753.o
24snd-soc-wm8900-objs := wm8900.o 26snd-soc-wm8900-objs := wm8900.o
25snd-soc-wm8903-objs := wm8903.o 27snd-soc-wm8903-objs := wm8903.o
28snd-soc-wm8940-objs := wm8940.o
29snd-soc-wm8960-objs := wm8960.o
26snd-soc-wm8971-objs := wm8971.o 30snd-soc-wm8971-objs := wm8971.o
31snd-soc-wm8988-objs := wm8988.o
27snd-soc-wm8990-objs := wm8990.o 32snd-soc-wm8990-objs := wm8990.o
33snd-soc-wm9081-objs := wm9081.o
28snd-soc-wm9705-objs := wm9705.o 34snd-soc-wm9705-objs := wm9705.o
29snd-soc-wm9712-objs := wm9712.o 35snd-soc-wm9712-objs := wm9712.o
30snd-soc-wm9713-objs := wm9713.o 36snd-soc-wm9713-objs := wm9713.o
@@ -37,7 +43,9 @@ obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
37obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o 43obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
38obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o 44obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
39obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o 45obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
46obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
40obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o 47obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o
48obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
41obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o 49obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
42obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o 50obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
43obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o 51obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
@@ -55,7 +63,11 @@ obj-$(CONFIG_SND_SOC_WM8753) += snd-soc-wm8753.o
55obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o 63obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o
56obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o 64obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o
57obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o 65obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o
66obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o
67obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o
68obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o
58obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o 69obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o
70obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o
59obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o 71obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o
60obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o 72obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o
61obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o 73obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index b0d4af145b87..932299bb5d1e 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -53,13 +53,13 @@ struct snd_soc_dai ac97_dai = {
53 .channels_min = 1, 53 .channels_min = 1,
54 .channels_max = 2, 54 .channels_max = 2,
55 .rates = STD_AC97_RATES, 55 .rates = STD_AC97_RATES,
56 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 56 .formats = SND_SOC_STD_AC97_FMTS,},
57 .capture = { 57 .capture = {
58 .stream_name = "AC97 Capture", 58 .stream_name = "AC97 Capture",
59 .channels_min = 1, 59 .channels_min = 1,
60 .channels_max = 2, 60 .channels_max = 2,
61 .rates = STD_AC97_RATES, 61 .rates = STD_AC97_RATES,
62 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 62 .formats = SND_SOC_STD_AC97_FMTS,},
63 .ops = &ac97_dai_ops, 63 .ops = &ac97_dai_ops,
64}; 64};
65EXPORT_SYMBOL_GPL(ac97_dai); 65EXPORT_SYMBOL_GPL(ac97_dai);
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index ddb3b08ac23c..d7440a982d22 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -137,13 +137,13 @@ struct snd_soc_dai ad1980_dai = {
137 .channels_min = 2, 137 .channels_min = 2,
138 .channels_max = 6, 138 .channels_max = 6,
139 .rates = SNDRV_PCM_RATE_48000, 139 .rates = SNDRV_PCM_RATE_48000,
140 .formats = SNDRV_PCM_FMTBIT_S16_LE, }, 140 .formats = SND_SOC_STD_AC97_FMTS, },
141 .capture = { 141 .capture = {
142 .stream_name = "Capture", 142 .stream_name = "Capture",
143 .channels_min = 2, 143 .channels_min = 2,
144 .channels_max = 2, 144 .channels_max = 2,
145 .rates = SNDRV_PCM_RATE_48000, 145 .rates = SNDRV_PCM_RATE_48000,
146 .formats = SNDRV_PCM_FMTBIT_S16_LE, }, 146 .formats = SND_SOC_STD_AC97_FMTS, },
147}; 147};
148EXPORT_SYMBOL_GPL(ad1980_dai); 148EXPORT_SYMBOL_GPL(ad1980_dai);
149 149
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 7fa09a387622..a32b8226c8a4 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -18,7 +18,7 @@
18 * - The machine driver's 'startup' function must call 18 * - The machine driver's 'startup' function must call
19 * cs4270_set_dai_sysclk() with the value of MCLK. 19 * cs4270_set_dai_sysclk() with the value of MCLK.
20 * - Only I2S and left-justified modes are supported 20 * - Only I2S and left-justified modes are supported
21 * - Power management is not supported 21 * - Power management is supported
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
@@ -27,6 +27,7 @@
27#include <sound/soc.h> 27#include <sound/soc.h>
28#include <sound/initval.h> 28#include <sound/initval.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/delay.h>
30 31
31#include "cs4270.h" 32#include "cs4270.h"
32 33
@@ -56,6 +57,7 @@
56#define CS4270_FIRSTREG 0x01 57#define CS4270_FIRSTREG 0x01
57#define CS4270_LASTREG 0x08 58#define CS4270_LASTREG 0x08
58#define CS4270_NUMREGS (CS4270_LASTREG - CS4270_FIRSTREG + 1) 59#define CS4270_NUMREGS (CS4270_LASTREG - CS4270_FIRSTREG + 1)
60#define CS4270_I2C_INCR 0x80
59 61
60/* Bit masks for the CS4270 registers */ 62/* Bit masks for the CS4270 registers */
61#define CS4270_CHIPID_ID 0xF0 63#define CS4270_CHIPID_ID 0xF0
@@ -64,6 +66,8 @@
64#define CS4270_PWRCTL_PDN_ADC 0x20 66#define CS4270_PWRCTL_PDN_ADC 0x20
65#define CS4270_PWRCTL_PDN_DAC 0x02 67#define CS4270_PWRCTL_PDN_DAC 0x02
66#define CS4270_PWRCTL_PDN 0x01 68#define CS4270_PWRCTL_PDN 0x01
69#define CS4270_PWRCTL_PDN_ALL \
70 (CS4270_PWRCTL_PDN_ADC | CS4270_PWRCTL_PDN_DAC | CS4270_PWRCTL_PDN)
67#define CS4270_MODE_SPEED_MASK 0x30 71#define CS4270_MODE_SPEED_MASK 0x30
68#define CS4270_MODE_1X 0x00 72#define CS4270_MODE_1X 0x00
69#define CS4270_MODE_2X 0x10 73#define CS4270_MODE_2X 0x10
@@ -109,6 +113,7 @@ struct cs4270_private {
109 unsigned int mclk; /* Input frequency of the MCLK pin */ 113 unsigned int mclk; /* Input frequency of the MCLK pin */
110 unsigned int mode; /* The mode (I2S or left-justified) */ 114 unsigned int mode; /* The mode (I2S or left-justified) */
111 unsigned int slave_mode; 115 unsigned int slave_mode;
116 unsigned int manual_mute;
112}; 117};
113 118
114/** 119/**
@@ -295,7 +300,7 @@ static int cs4270_fill_cache(struct snd_soc_codec *codec)
295 s32 length; 300 s32 length;
296 301
297 length = i2c_smbus_read_i2c_block_data(i2c_client, 302 length = i2c_smbus_read_i2c_block_data(i2c_client,
298 CS4270_FIRSTREG | 0x80, CS4270_NUMREGS, cache); 303 CS4270_FIRSTREG | CS4270_I2C_INCR, CS4270_NUMREGS, cache);
299 304
300 if (length != CS4270_NUMREGS) { 305 if (length != CS4270_NUMREGS) {
301 dev_err(codec->dev, "i2c read failure, addr=0x%x\n", 306 dev_err(codec->dev, "i2c read failure, addr=0x%x\n",
@@ -453,7 +458,7 @@ static int cs4270_hw_params(struct snd_pcm_substream *substream,
453} 458}
454 459
455/** 460/**
456 * cs4270_mute - enable/disable the CS4270 external mute 461 * cs4270_dai_mute - enable/disable the CS4270 external mute
457 * @dai: the SOC DAI 462 * @dai: the SOC DAI
458 * @mute: 0 = disable mute, 1 = enable mute 463 * @mute: 0 = disable mute, 1 = enable mute
459 * 464 *
@@ -462,21 +467,52 @@ static int cs4270_hw_params(struct snd_pcm_substream *substream,
462 * board does not have the MUTEA or MUTEB pins connected to such circuitry, 467 * board does not have the MUTEA or MUTEB pins connected to such circuitry,
463 * then this function will do nothing. 468 * then this function will do nothing.
464 */ 469 */
465static int cs4270_mute(struct snd_soc_dai *dai, int mute) 470static int cs4270_dai_mute(struct snd_soc_dai *dai, int mute)
466{ 471{
467 struct snd_soc_codec *codec = dai->codec; 472 struct snd_soc_codec *codec = dai->codec;
473 struct cs4270_private *cs4270 = codec->private_data;
468 int reg6; 474 int reg6;
469 475
470 reg6 = snd_soc_read(codec, CS4270_MUTE); 476 reg6 = snd_soc_read(codec, CS4270_MUTE);
471 477
472 if (mute) 478 if (mute)
473 reg6 |= CS4270_MUTE_DAC_A | CS4270_MUTE_DAC_B; 479 reg6 |= CS4270_MUTE_DAC_A | CS4270_MUTE_DAC_B;
474 else 480 else {
475 reg6 &= ~(CS4270_MUTE_DAC_A | CS4270_MUTE_DAC_B); 481 reg6 &= ~(CS4270_MUTE_DAC_A | CS4270_MUTE_DAC_B);
482 reg6 |= cs4270->manual_mute;
483 }
476 484
477 return snd_soc_write(codec, CS4270_MUTE, reg6); 485 return snd_soc_write(codec, CS4270_MUTE, reg6);
478} 486}
479 487
488/**
489 * cs4270_soc_put_mute - put callback for the 'Master Playback switch'
490 * alsa control.
491 * @kcontrol: mixer control
492 * @ucontrol: control element information
493 *
494 * This function basically passes the arguments on to the generic
495 * snd_soc_put_volsw() function and saves the mute information in
496 * our private data structure. This is because we want to prevent
497 * cs4270_dai_mute() neglecting the user's decision to manually
498 * mute the codec's output.
499 *
500 * Returns 0 for success.
501 */
502static int cs4270_soc_put_mute(struct snd_kcontrol *kcontrol,
503 struct snd_ctl_elem_value *ucontrol)
504{
505 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
506 struct cs4270_private *cs4270 = codec->private_data;
507 int left = !ucontrol->value.integer.value[0];
508 int right = !ucontrol->value.integer.value[1];
509
510 cs4270->manual_mute = (left ? CS4270_MUTE_DAC_A : 0) |
511 (right ? CS4270_MUTE_DAC_B : 0);
512
513 return snd_soc_put_volsw(kcontrol, ucontrol);
514}
515
480/* A list of non-DAPM controls that the CS4270 supports */ 516/* A list of non-DAPM controls that the CS4270 supports */
481static const struct snd_kcontrol_new cs4270_snd_controls[] = { 517static const struct snd_kcontrol_new cs4270_snd_controls[] = {
482 SOC_DOUBLE_R("Master Playback Volume", 518 SOC_DOUBLE_R("Master Playback Volume",
@@ -486,7 +522,9 @@ static const struct snd_kcontrol_new cs4270_snd_controls[] = {
486 SOC_SINGLE("Zero Cross Switch", CS4270_TRANS, 5, 1, 0), 522 SOC_SINGLE("Zero Cross Switch", CS4270_TRANS, 5, 1, 0),
487 SOC_SINGLE("Popguard Switch", CS4270_MODE, 0, 1, 1), 523 SOC_SINGLE("Popguard Switch", CS4270_MODE, 0, 1, 1),
488 SOC_SINGLE("Auto-Mute Switch", CS4270_MUTE, 5, 1, 0), 524 SOC_SINGLE("Auto-Mute Switch", CS4270_MUTE, 5, 1, 0),
489 SOC_DOUBLE("Master Capture Switch", CS4270_MUTE, 3, 4, 1, 0) 525 SOC_DOUBLE("Master Capture Switch", CS4270_MUTE, 3, 4, 1, 1),
526 SOC_DOUBLE_EXT("Master Playback Switch", CS4270_MUTE, 0, 1, 1, 1,
527 snd_soc_get_volsw, cs4270_soc_put_mute),
490}; 528};
491 529
492/* 530/*
@@ -506,7 +544,7 @@ static struct snd_soc_dai_ops cs4270_dai_ops = {
506 .hw_params = cs4270_hw_params, 544 .hw_params = cs4270_hw_params,
507 .set_sysclk = cs4270_set_dai_sysclk, 545 .set_sysclk = cs4270_set_dai_sysclk,
508 .set_fmt = cs4270_set_dai_fmt, 546 .set_fmt = cs4270_set_dai_fmt,
509 .digital_mute = cs4270_mute, 547 .digital_mute = cs4270_dai_mute,
510}; 548};
511 549
512struct snd_soc_dai cs4270_dai = { 550struct snd_soc_dai cs4270_dai = {
@@ -753,6 +791,57 @@ static struct i2c_device_id cs4270_id[] = {
753}; 791};
754MODULE_DEVICE_TABLE(i2c, cs4270_id); 792MODULE_DEVICE_TABLE(i2c, cs4270_id);
755 793
794#ifdef CONFIG_PM
795
796/* This suspend/resume implementation can handle both - a simple standby
797 * where the codec remains powered, and a full suspend, where the voltage
798 * domain the codec is connected to is teared down and/or any other hardware
799 * reset condition is asserted.
800 *
801 * The codec's own power saving features are enabled in the suspend callback,
802 * and all registers are written back to the hardware when resuming.
803 */
804
805static int cs4270_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
806{
807 struct cs4270_private *cs4270 = i2c_get_clientdata(client);
808 struct snd_soc_codec *codec = &cs4270->codec;
809 int reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
810
811 return snd_soc_write(codec, CS4270_PWRCTL, reg);
812}
813
814static int cs4270_i2c_resume(struct i2c_client *client)
815{
816 struct cs4270_private *cs4270 = i2c_get_clientdata(client);
817 struct snd_soc_codec *codec = &cs4270->codec;
818 int reg;
819
820 /* In case the device was put to hard reset during sleep, we need to
821 * wait 500ns here before any I2C communication. */
822 ndelay(500);
823
824 /* first restore the entire register cache ... */
825 for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
826 u8 val = snd_soc_read(codec, reg);
827
828 if (i2c_smbus_write_byte_data(client, reg, val)) {
829 dev_err(codec->dev, "i2c write failed\n");
830 return -EIO;
831 }
832 }
833
834 /* ... then disable the power-down bits */
835 reg = snd_soc_read(codec, CS4270_PWRCTL);
836 reg &= ~CS4270_PWRCTL_PDN_ALL;
837
838 return snd_soc_write(codec, CS4270_PWRCTL, reg);
839}
840#else
841#define cs4270_i2c_suspend NULL
842#define cs4270_i2c_resume NULL
843#endif /* CONFIG_PM */
844
756/* 845/*
757 * cs4270_i2c_driver - I2C device identification 846 * cs4270_i2c_driver - I2C device identification
758 * 847 *
@@ -767,6 +856,8 @@ static struct i2c_driver cs4270_i2c_driver = {
767 .id_table = cs4270_id, 856 .id_table = cs4270_id,
768 .probe = cs4270_i2c_probe, 857 .probe = cs4270_i2c_probe,
769 .remove = cs4270_i2c_remove, 858 .remove = cs4270_i2c_remove,
859 .suspend = cs4270_i2c_suspend,
860 .resume = cs4270_i2c_resume,
770}; 861};
771 862
772/* 863/*
diff --git a/sound/soc/codecs/spdif_transciever.c b/sound/soc/codecs/spdif_transciever.c
new file mode 100644
index 000000000000..218b33adad90
--- /dev/null
+++ b/sound/soc/codecs/spdif_transciever.c
@@ -0,0 +1,71 @@
1/*
2 * ALSA SoC SPDIF DIT driver
3 *
4 * This driver is used by controllers which can operate in DIT (SPDI/F) where
5 * no codec is needed. This file provides stub codec that can be used
6 * in these configurations. TI DaVinci Audio controller uses this driver.
7 *
8 * Author: Steve Chen, <schen@mvista.com>
9 * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
10 * Copyright: (C) 2009 Texas Instruments, India
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <sound/soc.h>
20#include <sound/pcm.h>
21
22#include "spdif_transciever.h"
23
24#define STUB_RATES SNDRV_PCM_RATE_8000_96000
25#define STUB_FORMATS SNDRV_PCM_FMTBIT_S16_LE
26
27struct snd_soc_dai dit_stub_dai = {
28 .name = "DIT",
29 .playback = {
30 .stream_name = "Playback",
31 .channels_min = 1,
32 .channels_max = 384,
33 .rates = STUB_RATES,
34 .formats = STUB_FORMATS,
35 },
36};
37
38static int spdif_dit_probe(struct platform_device *pdev)
39{
40 dit_stub_dai.dev = &pdev->dev;
41 return snd_soc_register_dai(&dit_stub_dai);
42}
43
44static int spdif_dit_remove(struct platform_device *pdev)
45{
46 snd_soc_unregister_dai(&dit_stub_dai);
47 return 0;
48}
49
50static struct platform_driver spdif_dit_driver = {
51 .probe = spdif_dit_probe,
52 .remove = spdif_dit_remove,
53 .driver = {
54 .name = "spdif-dit",
55 .owner = THIS_MODULE,
56 },
57};
58
59static int __init dit_modinit(void)
60{
61 return platform_driver_register(&spdif_dit_driver);
62}
63
64static void __exit dit_exit(void)
65{
66 platform_driver_unregister(&spdif_dit_driver);
67}
68
69module_init(dit_modinit);
70module_exit(dit_exit);
71
diff --git a/sound/soc/codecs/spdif_transciever.h b/sound/soc/codecs/spdif_transciever.h
new file mode 100644
index 000000000000..296f2eb6c4ef
--- /dev/null
+++ b/sound/soc/codecs/spdif_transciever.h
@@ -0,0 +1,17 @@
1/*
2 * ALSA SoC DIT/DIR driver header
3 *
4 * Author: Steve Chen, <schen@mvista.com>
5 * Copyright: (C) 2008 MontaVista Software, Inc., <source@mvista.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef CODEC_STUBS_H
13#define CODEC_STUBS_H
14
15extern struct snd_soc_dai dit_stub_dai;
16
17#endif /* CODEC_STUBS_H */
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 87f606c76822..1fc4c8e0899c 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -336,15 +336,17 @@ static int ssm2602_startup(struct snd_pcm_substream *substream,
336 master_runtime->sample_bits, 336 master_runtime->sample_bits,
337 master_runtime->rate); 337 master_runtime->rate);
338 338
339 snd_pcm_hw_constraint_minmax(substream->runtime, 339 if (master_runtime->rate != 0)
340 SNDRV_PCM_HW_PARAM_RATE, 340 snd_pcm_hw_constraint_minmax(substream->runtime,
341 master_runtime->rate, 341 SNDRV_PCM_HW_PARAM_RATE,
342 master_runtime->rate); 342 master_runtime->rate,
343 343 master_runtime->rate);
344 snd_pcm_hw_constraint_minmax(substream->runtime, 344
345 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 345 if (master_runtime->sample_bits != 0)
346 master_runtime->sample_bits, 346 snd_pcm_hw_constraint_minmax(substream->runtime,
347 master_runtime->sample_bits); 347 SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
348 master_runtime->sample_bits,
349 master_runtime->sample_bits);
348 350
349 ssm2602->slave_substream = substream; 351 ssm2602->slave_substream = substream;
350 } else 352 } else
@@ -372,6 +374,11 @@ static void ssm2602_shutdown(struct snd_pcm_substream *substream,
372 struct snd_soc_device *socdev = rtd->socdev; 374 struct snd_soc_device *socdev = rtd->socdev;
373 struct snd_soc_codec *codec = socdev->card->codec; 375 struct snd_soc_codec *codec = socdev->card->codec;
374 struct ssm2602_priv *ssm2602 = codec->private_data; 376 struct ssm2602_priv *ssm2602 = codec->private_data;
377
378 if (ssm2602->master_substream == substream)
379 ssm2602->master_substream = ssm2602->slave_substream;
380
381 ssm2602->slave_substream = NULL;
375 /* deactivate */ 382 /* deactivate */
376 if (!codec->active) 383 if (!codec->active)
377 ssm2602_write(codec, SSM2602_ACTIVE, 0); 384 ssm2602_write(codec, SSM2602_ACTIVE, 0);
@@ -497,11 +504,9 @@ static int ssm2602_set_bias_level(struct snd_soc_codec *codec,
497 return 0; 504 return 0;
498} 505}
499 506
500#define SSM2602_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ 507#define SSM2602_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_32000 |\
501 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ 508 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
502 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\ 509 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
503 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
504 SNDRV_PCM_RATE_96000)
505 510
506#define SSM2602_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ 511#define SSM2602_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
507 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) 512 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
new file mode 100644
index 000000000000..8ad4b7b3e3ba
--- /dev/null
+++ b/sound/soc/codecs/stac9766.c
@@ -0,0 +1,463 @@
1/*
2 * stac9766.c -- ALSA SoC STAC9766 codec support
3 *
4 * Copyright 2009 Jon Smirl, Digispeaker
5 * Author: Jon Smirl <jonsmirl@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * Features:-
13 *
14 * o Support for AC97 Codec, S/PDIF
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/device.h>
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/ac97_codec.h>
23#include <sound/initval.h>
24#include <sound/pcm_params.h>
25#include <sound/soc.h>
26#include <sound/tlv.h>
27#include <sound/soc-of-simple.h>
28
29#include "stac9766.h"
30
31#define STAC9766_VERSION "0.10"
32
33/*
34 * STAC9766 register cache
35 */
36static const u16 stac9766_reg[] = {
37 0x6A90, 0x8000, 0x8000, 0x8000, /* 6 */
38 0x0000, 0x0000, 0x8008, 0x8008, /* e */
39 0x8808, 0x8808, 0x8808, 0x8808, /* 16 */
40 0x8808, 0x0000, 0x8000, 0x0000, /* 1e */
41 0x0000, 0x0000, 0x0000, 0x000f, /* 26 */
42 0x0a05, 0x0400, 0xbb80, 0x0000, /* 2e */
43 0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */
44 0x0000, 0x2000, 0x0000, 0x0100, /* 3e */
45 0x0000, 0x0000, 0x0080, 0x0000, /* 46 */
46 0x0000, 0x0000, 0x0003, 0xffff, /* 4e */
47 0x0000, 0x0000, 0x0000, 0x0000, /* 56 */
48 0x4000, 0x0000, 0x0000, 0x0000, /* 5e */
49 0x1201, 0xFFFF, 0xFFFF, 0x0000, /* 66 */
50 0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
51 0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
52 0x0000, 0x0000, 0x0000, 0x0000, /* 7e */
53};
54
55static const char *stac9766_record_mux[] = {"Mic", "CD", "Video", "AUX",
56 "Line", "Stereo Mix", "Mono Mix", "Phone"};
57static const char *stac9766_mono_mux[] = {"Mix", "Mic"};
58static const char *stac9766_mic_mux[] = {"Mic1", "Mic2"};
59static const char *stac9766_SPDIF_mux[] = {"PCM", "ADC Record"};
60static const char *stac9766_popbypass_mux[] = {"Normal", "Bypass Mixer"};
61static const char *stac9766_record_all_mux[] = {"All analog",
62 "Analog plus DAC"};
63static const char *stac9766_boost1[] = {"0dB", "10dB"};
64static const char *stac9766_boost2[] = {"0dB", "20dB"};
65static const char *stac9766_stereo_mic[] = {"Off", "On"};
66
67static const struct soc_enum stac9766_record_enum =
68 SOC_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 8, stac9766_record_mux);
69static const struct soc_enum stac9766_mono_enum =
70 SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 9, 2, stac9766_mono_mux);
71static const struct soc_enum stac9766_mic_enum =
72 SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 8, 2, stac9766_mic_mux);
73static const struct soc_enum stac9766_SPDIF_enum =
74 SOC_ENUM_SINGLE(AC97_STAC_DA_CONTROL, 1, 2, stac9766_SPDIF_mux);
75static const struct soc_enum stac9766_popbypass_enum =
76 SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, stac9766_popbypass_mux);
77static const struct soc_enum stac9766_record_all_enum =
78 SOC_ENUM_SINGLE(AC97_STAC_ANALOG_SPECIAL, 12, 2,
79 stac9766_record_all_mux);
80static const struct soc_enum stac9766_boost1_enum =
81 SOC_ENUM_SINGLE(AC97_MIC, 6, 2, stac9766_boost1); /* 0/10dB */
82static const struct soc_enum stac9766_boost2_enum =
83 SOC_ENUM_SINGLE(AC97_STAC_ANALOG_SPECIAL, 2, 2, stac9766_boost2); /* 0/20dB */
84static const struct soc_enum stac9766_stereo_mic_enum =
85 SOC_ENUM_SINGLE(AC97_STAC_STEREO_MIC, 2, 1, stac9766_stereo_mic);
86
87static const DECLARE_TLV_DB_LINEAR(master_tlv, -4600, 0);
88static const DECLARE_TLV_DB_LINEAR(record_tlv, 0, 2250);
89static const DECLARE_TLV_DB_LINEAR(beep_tlv, -4500, 0);
90static const DECLARE_TLV_DB_LINEAR(mix_tlv, -3450, 1200);
91
92static const struct snd_kcontrol_new stac9766_snd_ac97_controls[] = {
93 SOC_DOUBLE_TLV("Speaker Volume", AC97_MASTER, 8, 0, 31, 1, master_tlv),
94 SOC_SINGLE("Speaker Switch", AC97_MASTER, 15, 1, 1),
95 SOC_DOUBLE_TLV("Headphone Volume", AC97_HEADPHONE, 8, 0, 31, 1,
96 master_tlv),
97 SOC_SINGLE("Headphone Switch", AC97_HEADPHONE, 15, 1, 1),
98 SOC_SINGLE_TLV("Mono Out Volume", AC97_MASTER_MONO, 0, 31, 1,
99 master_tlv),
100 SOC_SINGLE("Mono Out Switch", AC97_MASTER_MONO, 15, 1, 1),
101
102 SOC_DOUBLE_TLV("Record Volume", AC97_REC_GAIN, 8, 0, 15, 0, record_tlv),
103 SOC_SINGLE("Record Switch", AC97_REC_GAIN, 15, 1, 1),
104
105
106 SOC_SINGLE_TLV("Beep Volume", AC97_PC_BEEP, 1, 15, 1, beep_tlv),
107 SOC_SINGLE("Beep Switch", AC97_PC_BEEP, 15, 1, 1),
108 SOC_SINGLE("Beep Frequency", AC97_PC_BEEP, 5, 127, 1),
109 SOC_SINGLE_TLV("Phone Volume", AC97_PHONE, 0, 31, 1, mix_tlv),
110 SOC_SINGLE("Phone Switch", AC97_PHONE, 15, 1, 1),
111
112 SOC_ENUM("Mic Boost1", stac9766_boost1_enum),
113 SOC_ENUM("Mic Boost2", stac9766_boost2_enum),
114 SOC_SINGLE_TLV("Mic Volume", AC97_MIC, 0, 31, 1, mix_tlv),
115 SOC_SINGLE("Mic Switch", AC97_MIC, 15, 1, 1),
116 SOC_ENUM("Stereo Mic", stac9766_stereo_mic_enum),
117
118 SOC_DOUBLE_TLV("Line Volume", AC97_LINE, 8, 0, 31, 1, mix_tlv),
119 SOC_SINGLE("Line Switch", AC97_LINE, 15, 1, 1),
120 SOC_DOUBLE_TLV("CD Volume", AC97_CD, 8, 0, 31, 1, mix_tlv),
121 SOC_SINGLE("CD Switch", AC97_CD, 15, 1, 1),
122 SOC_DOUBLE_TLV("AUX Volume", AC97_AUX, 8, 0, 31, 1, mix_tlv),
123 SOC_SINGLE("AUX Switch", AC97_AUX, 15, 1, 1),
124 SOC_DOUBLE_TLV("Video Volume", AC97_VIDEO, 8, 0, 31, 1, mix_tlv),
125 SOC_SINGLE("Video Switch", AC97_VIDEO, 15, 1, 1),
126
127 SOC_DOUBLE_TLV("DAC Volume", AC97_PCM, 8, 0, 31, 1, mix_tlv),
128 SOC_SINGLE("DAC Switch", AC97_PCM, 15, 1, 1),
129 SOC_SINGLE("Loopback Test Switch", AC97_GENERAL_PURPOSE, 7, 1, 0),
130 SOC_SINGLE("3D Volume", AC97_3D_CONTROL, 3, 2, 1),
131 SOC_SINGLE("3D Switch", AC97_GENERAL_PURPOSE, 13, 1, 0),
132
133 SOC_ENUM("SPDIF Mux", stac9766_SPDIF_enum),
134 SOC_ENUM("Mic1/2 Mux", stac9766_mic_enum),
135 SOC_ENUM("Record All Mux", stac9766_record_all_enum),
136 SOC_ENUM("Record Mux", stac9766_record_enum),
137 SOC_ENUM("Mono Mux", stac9766_mono_enum),
138 SOC_ENUM("Pop Bypass Mux", stac9766_popbypass_enum),
139};
140
141static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
142 unsigned int val)
143{
144 u16 *cache = codec->reg_cache;
145
146 if (reg > AC97_STAC_PAGE0) {
147 stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
148 soc_ac97_ops.write(codec->ac97, reg, val);
149 stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
150 return 0;
151 }
152 if (reg / 2 > ARRAY_SIZE(stac9766_reg))
153 return -EIO;
154
155 soc_ac97_ops.write(codec->ac97, reg, val);
156 cache[reg / 2] = val;
157 return 0;
158}
159
160static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
161 unsigned int reg)
162{
163 u16 val = 0, *cache = codec->reg_cache;
164
165 if (reg > AC97_STAC_PAGE0) {
166 stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
167 val = soc_ac97_ops.read(codec->ac97, reg - AC97_STAC_PAGE0);
168 stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
169 return val;
170 }
171 if (reg / 2 > ARRAY_SIZE(stac9766_reg))
172 return -EIO;
173
174 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
175 reg == AC97_INT_PAGING || reg == AC97_VENDOR_ID1 ||
176 reg == AC97_VENDOR_ID2) {
177
178 val = soc_ac97_ops.read(codec->ac97, reg);
179 return val;
180 }
181 return cache[reg / 2];
182}
183
184static int ac97_analog_prepare(struct snd_pcm_substream *substream,
185 struct snd_soc_dai *dai)
186{
187 struct snd_soc_codec *codec = dai->codec;
188 struct snd_pcm_runtime *runtime = substream->runtime;
189 unsigned short reg, vra;
190
191 vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
192
193 vra |= 0x1; /* enable variable rate audio */
194
195 stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
196
197 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
198 reg = AC97_PCM_FRONT_DAC_RATE;
199 else
200 reg = AC97_PCM_LR_ADC_RATE;
201
202 return stac9766_ac97_write(codec, reg, runtime->rate);
203}
204
205static int ac97_digital_prepare(struct snd_pcm_substream *substream,
206 struct snd_soc_dai *dai)
207{
208 struct snd_soc_codec *codec = dai->codec;
209 struct snd_pcm_runtime *runtime = substream->runtime;
210 unsigned short reg, vra;
211
212 stac9766_ac97_write(codec, AC97_SPDIF, 0x2002);
213
214 vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
215 vra |= 0x5; /* Enable VRA and SPDIF out */
216
217 stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
218
219 reg = AC97_PCM_FRONT_DAC_RATE;
220
221 return stac9766_ac97_write(codec, reg, runtime->rate);
222}
223
224static int ac97_digital_trigger(struct snd_pcm_substream *substream,
225 int cmd, struct snd_soc_dai *dai)
226{
227 struct snd_soc_codec *codec = dai->codec;
228 unsigned short vra;
229
230 switch (cmd) {
231 case SNDRV_PCM_TRIGGER_STOP:
232 vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
233 vra &= !0x04;
234 stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
235 break;
236 }
237 return 0;
238}
239
240static int stac9766_set_bias_level(struct snd_soc_codec *codec,
241 enum snd_soc_bias_level level)
242{
243 switch (level) {
244 case SND_SOC_BIAS_ON: /* full On */
245 case SND_SOC_BIAS_PREPARE: /* partial On */
246 case SND_SOC_BIAS_STANDBY: /* Off, with power */
247 stac9766_ac97_write(codec, AC97_POWERDOWN, 0x0000);
248 break;
249 case SND_SOC_BIAS_OFF: /* Off, without power */
250 /* disable everything including AC link */
251 stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff);
252 break;
253 }
254 codec->bias_level = level;
255 return 0;
256}
257
258static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
259{
260 if (try_warm && soc_ac97_ops.warm_reset) {
261 soc_ac97_ops.warm_reset(codec->ac97);
262 if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
263 return 1;
264 }
265
266 soc_ac97_ops.reset(codec->ac97);
267 if (soc_ac97_ops.warm_reset)
268 soc_ac97_ops.warm_reset(codec->ac97);
269 if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
270 return -EIO;
271 return 0;
272}
273
274static int stac9766_codec_suspend(struct platform_device *pdev,
275 pm_message_t state)
276{
277 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
278 struct snd_soc_codec *codec = socdev->card->codec;
279
280 stac9766_set_bias_level(codec, SND_SOC_BIAS_OFF);
281 return 0;
282}
283
284static int stac9766_codec_resume(struct platform_device *pdev)
285{
286 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
287 struct snd_soc_codec *codec = socdev->card->codec;
288 u16 id, reset;
289
290 reset = 0;
291 /* give the codec an AC97 warm reset to start the link */
292reset:
293 if (reset > 5) {
294 printk(KERN_ERR "stac9766 failed to resume");
295 return -EIO;
296 }
297 codec->ac97->bus->ops->warm_reset(codec->ac97);
298 id = soc_ac97_ops.read(codec->ac97, AC97_VENDOR_ID2);
299 if (id != 0x4c13) {
300 stac9766_reset(codec, 0);
301 reset++;
302 goto reset;
303 }
304 stac9766_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
305
306 if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
307 stac9766_set_bias_level(codec, SND_SOC_BIAS_ON);
308
309 return 0;
310}
311
312static struct snd_soc_dai_ops stac9766_dai_ops_analog = {
313 .prepare = ac97_analog_prepare,
314};
315
316static struct snd_soc_dai_ops stac9766_dai_ops_digital = {
317 .prepare = ac97_digital_prepare,
318 .trigger = ac97_digital_trigger,
319};
320
321struct snd_soc_dai stac9766_dai[] = {
322{
323 .name = "stac9766 analog",
324 .id = 0,
325 .ac97_control = 1,
326
327 /* stream cababilities */
328 .playback = {
329 .stream_name = "stac9766 analog",
330 .channels_min = 1,
331 .channels_max = 2,
332 .rates = SNDRV_PCM_RATE_8000_48000,
333 .formats = SND_SOC_STD_AC97_FMTS,
334 },
335 .capture = {
336 .stream_name = "stac9766 analog",
337 .channels_min = 1,
338 .channels_max = 2,
339 .rates = SNDRV_PCM_RATE_8000_48000,
340 .formats = SND_SOC_STD_AC97_FMTS,
341 },
342 /* alsa ops */
343 .ops = &stac9766_dai_ops_analog,
344},
345{
346 .name = "stac9766 IEC958",
347 .id = 1,
348 .ac97_control = 1,
349
350 /* stream cababilities */
351 .playback = {
352 .stream_name = "stac9766 IEC958",
353 .channels_min = 1,
354 .channels_max = 2,
355 .rates = SNDRV_PCM_RATE_32000 | \
356 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
357 .formats = SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE,
358 },
359 /* alsa ops */
360 .ops = &stac9766_dai_ops_digital,
361}
362};
363EXPORT_SYMBOL_GPL(stac9766_dai);
364
365static int stac9766_codec_probe(struct platform_device *pdev)
366{
367 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
368 struct snd_soc_codec *codec;
369 int ret = 0;
370
371 printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION);
372
373 socdev->card->codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
374 if (socdev->card->codec == NULL)
375 return -ENOMEM;
376 codec = socdev->card->codec;
377 mutex_init(&codec->mutex);
378
379 codec->reg_cache = kmemdup(stac9766_reg, sizeof(stac9766_reg),
380 GFP_KERNEL);
381 if (codec->reg_cache == NULL) {
382 ret = -ENOMEM;
383 goto cache_err;
384 }
385 codec->reg_cache_size = sizeof(stac9766_reg);
386 codec->reg_cache_step = 2;
387
388 codec->name = "STAC9766";
389 codec->owner = THIS_MODULE;
390 codec->dai = stac9766_dai;
391 codec->num_dai = ARRAY_SIZE(stac9766_dai);
392 codec->write = stac9766_ac97_write;
393 codec->read = stac9766_ac97_read;
394 codec->set_bias_level = stac9766_set_bias_level;
395 INIT_LIST_HEAD(&codec->dapm_widgets);
396 INIT_LIST_HEAD(&codec->dapm_paths);
397
398 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
399 if (ret < 0)
400 goto codec_err;
401
402 /* register pcms */
403 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
404 if (ret < 0)
405 goto pcm_err;
406
407 /* do a cold reset for the controller and then try
408 * a warm reset followed by an optional cold reset for codec */
409 stac9766_reset(codec, 0);
410 ret = stac9766_reset(codec, 1);
411 if (ret < 0) {
412 printk(KERN_ERR "Failed to reset STAC9766: AC97 link error\n");
413 goto reset_err;
414 }
415
416 stac9766_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
417
418 snd_soc_add_controls(codec, stac9766_snd_ac97_controls,
419 ARRAY_SIZE(stac9766_snd_ac97_controls));
420
421 ret = snd_soc_init_card(socdev);
422 if (ret < 0)
423 goto reset_err;
424 return 0;
425
426reset_err:
427 snd_soc_free_pcms(socdev);
428pcm_err:
429 snd_soc_free_ac97_codec(codec);
430codec_err:
431 kfree(codec->private_data);
432cache_err:
433 kfree(socdev->card->codec);
434 socdev->card->codec = NULL;
435 return ret;
436}
437
438static int stac9766_codec_remove(struct platform_device *pdev)
439{
440 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
441 struct snd_soc_codec *codec = socdev->card->codec;
442
443 if (codec == NULL)
444 return 0;
445
446 snd_soc_free_pcms(socdev);
447 snd_soc_free_ac97_codec(codec);
448 kfree(codec->reg_cache);
449 kfree(codec);
450 return 0;
451}
452
453struct snd_soc_codec_device soc_codec_dev_stac9766 = {
454 .probe = stac9766_codec_probe,
455 .remove = stac9766_codec_remove,
456 .suspend = stac9766_codec_suspend,
457 .resume = stac9766_codec_resume,
458};
459EXPORT_SYMBOL_GPL(soc_codec_dev_stac9766);
460
461MODULE_DESCRIPTION("ASoC stac9766 driver");
462MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
463MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/stac9766.h b/sound/soc/codecs/stac9766.h
new file mode 100644
index 000000000000..65642eb8393e
--- /dev/null
+++ b/sound/soc/codecs/stac9766.h
@@ -0,0 +1,21 @@
1/*
2 * stac9766.h -- STAC9766 Soc Audio driver
3 */
4
5#ifndef _STAC9766_H
6#define _STAC9766_H
7
8#define AC97_STAC_PAGE0 0x1000
9#define AC97_STAC_DA_CONTROL (AC97_STAC_PAGE0 | 0x6A)
10#define AC97_STAC_ANALOG_SPECIAL (AC97_STAC_PAGE0 | 0x6E)
11#define AC97_STAC_STEREO_MIC 0x78
12
13/* STAC9766 DAI ID's */
14#define STAC9766_DAI_AC97_ANALOG 0
15#define STAC9766_DAI_AC97_DIGITAL 1
16
17extern struct snd_soc_dai stac9766_dai[];
18extern struct snd_soc_codec_device soc_codec_dev_stac9766;
19
20
21#endif
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index c3f4afb5d017..0b8dcb5cd729 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -86,7 +86,7 @@ static int tlv320aic23_write(struct snd_soc_codec *codec, unsigned int reg,
86 */ 86 */
87 87
88 if ((reg < 0 || reg > 9) && (reg != 15)) { 88 if ((reg < 0 || reg > 9) && (reg != 15)) {
89 printk(KERN_WARNING "%s Invalid register R%d\n", __func__, reg); 89 printk(KERN_WARNING "%s Invalid register R%u\n", __func__, reg);
90 return -1; 90 return -1;
91 } 91 }
92 92
@@ -98,7 +98,7 @@ static int tlv320aic23_write(struct snd_soc_codec *codec, unsigned int reg,
98 if (codec->hw_write(codec->control_data, data, 2) == 2) 98 if (codec->hw_write(codec->control_data, data, 2) == 2)
99 return 0; 99 return 0;
100 100
101 printk(KERN_ERR "%s cannot write %03x to register R%d\n", __func__, 101 printk(KERN_ERR "%s cannot write %03x to register R%u\n", __func__,
102 value, reg); 102 value, reg);
103 103
104 return -EIO; 104 return -EIO;
@@ -273,14 +273,14 @@ static const unsigned short sr_valid_mask[] = {
273 * Every divisor is a factor of 11*12 273 * Every divisor is a factor of 11*12
274 */ 274 */
275#define SR_MULT (11*12) 275#define SR_MULT (11*12)
276#define A(x) (x) ? (SR_MULT/x) : 0 276#define A(x) (SR_MULT/x)
277static const unsigned char sr_adc_mult_table[] = { 277static const unsigned char sr_adc_mult_table[] = {
278 A(2), A(2), A(12), A(12), A(0), A(0), A(3), A(1), 278 A(2), A(2), A(12), A(12), 0, 0, A(3), A(1),
279 A(2), A(2), A(11), A(11), A(0), A(0), A(0), A(1) 279 A(2), A(2), A(11), A(11), 0, 0, 0, A(1)
280}; 280};
281static const unsigned char sr_dac_mult_table[] = { 281static const unsigned char sr_dac_mult_table[] = {
282 A(2), A(12), A(2), A(12), A(0), A(0), A(3), A(1), 282 A(2), A(12), A(2), A(12), 0, 0, A(3), A(1),
283 A(2), A(11), A(2), A(11), A(0), A(0), A(0), A(1) 283 A(2), A(11), A(2), A(11), 0, 0, 0, A(1)
284}; 284};
285 285
286static unsigned get_score(int adc, int adc_l, int adc_h, int need_adc, 286static unsigned get_score(int adc, int adc_l, int adc_h, int need_adc,
@@ -523,6 +523,8 @@ static int tlv320aic23_set_dai_fmt(struct snd_soc_dai *codec_dai,
523 case SND_SOC_DAIFMT_I2S: 523 case SND_SOC_DAIFMT_I2S:
524 iface_reg |= TLV320AIC23_FOR_I2S; 524 iface_reg |= TLV320AIC23_FOR_I2S;
525 break; 525 break;
526 case SND_SOC_DAIFMT_DSP_A:
527 iface_reg |= TLV320AIC23_LRP_ON;
526 case SND_SOC_DAIFMT_DSP_B: 528 case SND_SOC_DAIFMT_DSP_B:
527 iface_reg |= TLV320AIC23_FOR_DSP; 529 iface_reg |= TLV320AIC23_FOR_DSP;
528 break; 530 break;
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index df7c8c281d2f..4dbb853eef5a 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -115,6 +115,7 @@ static const u8 twl4030_reg[TWL4030_CACHEREGNUM] = {
115 0x00, /* REG_VIBRA_PWM_SET (0x47) */ 115 0x00, /* REG_VIBRA_PWM_SET (0x47) */
116 0x00, /* REG_ANAMIC_GAIN (0x48) */ 116 0x00, /* REG_ANAMIC_GAIN (0x48) */
117 0x00, /* REG_MISC_SET_2 (0x49) */ 117 0x00, /* REG_MISC_SET_2 (0x49) */
118 0x00, /* REG_SW_SHADOW (0x4A) - Shadow, non HW register */
118}; 119};
119 120
120/* codec private data */ 121/* codec private data */
@@ -125,6 +126,17 @@ struct twl4030_priv {
125 126
126 struct snd_pcm_substream *master_substream; 127 struct snd_pcm_substream *master_substream;
127 struct snd_pcm_substream *slave_substream; 128 struct snd_pcm_substream *slave_substream;
129
130 unsigned int configured;
131 unsigned int rate;
132 unsigned int sample_bits;
133 unsigned int channels;
134
135 unsigned int sysclk;
136
137 /* Headset output state handling */
138 unsigned int hsl_enabled;
139 unsigned int hsr_enabled;
128}; 140};
129 141
130/* 142/*
@@ -161,7 +173,11 @@ static int twl4030_write(struct snd_soc_codec *codec,
161 unsigned int reg, unsigned int value) 173 unsigned int reg, unsigned int value)
162{ 174{
163 twl4030_write_reg_cache(codec, reg, value); 175 twl4030_write_reg_cache(codec, reg, value);
164 return twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg); 176 if (likely(reg < TWL4030_REG_SW_SHADOW))
177 return twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value,
178 reg);
179 else
180 return 0;
165} 181}
166 182
167static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable) 183static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable)
@@ -188,6 +204,7 @@ static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable)
188 204
189static void twl4030_init_chip(struct snd_soc_codec *codec) 205static void twl4030_init_chip(struct snd_soc_codec *codec)
190{ 206{
207 u8 *cache = codec->reg_cache;
191 int i; 208 int i;
192 209
193 /* clear CODECPDZ prior to setting register defaults */ 210 /* clear CODECPDZ prior to setting register defaults */
@@ -195,7 +212,7 @@ static void twl4030_init_chip(struct snd_soc_codec *codec)
195 212
196 /* set all audio section registers to reasonable defaults */ 213 /* set all audio section registers to reasonable defaults */
197 for (i = TWL4030_REG_OPTION; i <= TWL4030_REG_MISC_SET_2; i++) 214 for (i = TWL4030_REG_OPTION; i <= TWL4030_REG_MISC_SET_2; i++)
198 twl4030_write(codec, i, twl4030_reg[i]); 215 twl4030_write(codec, i, cache[i]);
199 216
200} 217}
201 218
@@ -232,7 +249,7 @@ static void twl4030_codec_mute(struct snd_soc_codec *codec, int mute)
232 TWL4030_REG_PRECKL_CTL); 249 TWL4030_REG_PRECKL_CTL);
233 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PRECKR_CTL); 250 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PRECKR_CTL);
234 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 251 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
235 reg_val & (~TWL4030_PRECKL_GAIN), 252 reg_val & (~TWL4030_PRECKR_GAIN),
236 TWL4030_REG_PRECKR_CTL); 253 TWL4030_REG_PRECKR_CTL);
237 254
238 /* Disable PLL */ 255 /* Disable PLL */
@@ -316,104 +333,60 @@ static void twl4030_power_down(struct snd_soc_codec *codec)
316} 333}
317 334
318/* Earpiece */ 335/* Earpiece */
319static const char *twl4030_earpiece_texts[] = 336static const struct snd_kcontrol_new twl4030_dapm_earpiece_controls[] = {
320 {"Off", "DACL1", "DACL2", "DACR1"}; 337 SOC_DAPM_SINGLE("Voice", TWL4030_REG_EAR_CTL, 0, 1, 0),
321 338 SOC_DAPM_SINGLE("AudioL1", TWL4030_REG_EAR_CTL, 1, 1, 0),
322static const unsigned int twl4030_earpiece_values[] = 339 SOC_DAPM_SINGLE("AudioL2", TWL4030_REG_EAR_CTL, 2, 1, 0),
323 {0x0, 0x1, 0x2, 0x4}; 340 SOC_DAPM_SINGLE("AudioR1", TWL4030_REG_EAR_CTL, 3, 1, 0),
324 341};
325static const struct soc_enum twl4030_earpiece_enum =
326 SOC_VALUE_ENUM_SINGLE(TWL4030_REG_EAR_CTL, 1, 0x7,
327 ARRAY_SIZE(twl4030_earpiece_texts),
328 twl4030_earpiece_texts,
329 twl4030_earpiece_values);
330
331static const struct snd_kcontrol_new twl4030_dapm_earpiece_control =
332SOC_DAPM_VALUE_ENUM("Route", twl4030_earpiece_enum);
333 342
334/* PreDrive Left */ 343/* PreDrive Left */
335static const char *twl4030_predrivel_texts[] = 344static const struct snd_kcontrol_new twl4030_dapm_predrivel_controls[] = {
336 {"Off", "DACL1", "DACL2", "DACR2"}; 345 SOC_DAPM_SINGLE("Voice", TWL4030_REG_PREDL_CTL, 0, 1, 0),
337 346 SOC_DAPM_SINGLE("AudioL1", TWL4030_REG_PREDL_CTL, 1, 1, 0),
338static const unsigned int twl4030_predrivel_values[] = 347 SOC_DAPM_SINGLE("AudioL2", TWL4030_REG_PREDL_CTL, 2, 1, 0),
339 {0x0, 0x1, 0x2, 0x4}; 348 SOC_DAPM_SINGLE("AudioR2", TWL4030_REG_PREDL_CTL, 3, 1, 0),
340 349};
341static const struct soc_enum twl4030_predrivel_enum =
342 SOC_VALUE_ENUM_SINGLE(TWL4030_REG_PREDL_CTL, 1, 0x7,
343 ARRAY_SIZE(twl4030_predrivel_texts),
344 twl4030_predrivel_texts,
345 twl4030_predrivel_values);
346
347static const struct snd_kcontrol_new twl4030_dapm_predrivel_control =
348SOC_DAPM_VALUE_ENUM("Route", twl4030_predrivel_enum);
349 350
350/* PreDrive Right */ 351/* PreDrive Right */
351static const char *twl4030_predriver_texts[] = 352static const struct snd_kcontrol_new twl4030_dapm_predriver_controls[] = {
352 {"Off", "DACR1", "DACR2", "DACL2"}; 353 SOC_DAPM_SINGLE("Voice", TWL4030_REG_PREDR_CTL, 0, 1, 0),
353 354 SOC_DAPM_SINGLE("AudioR1", TWL4030_REG_PREDR_CTL, 1, 1, 0),
354static const unsigned int twl4030_predriver_values[] = 355 SOC_DAPM_SINGLE("AudioR2", TWL4030_REG_PREDR_CTL, 2, 1, 0),
355 {0x0, 0x1, 0x2, 0x4}; 356 SOC_DAPM_SINGLE("AudioL2", TWL4030_REG_PREDR_CTL, 3, 1, 0),
356 357};
357static const struct soc_enum twl4030_predriver_enum =
358 SOC_VALUE_ENUM_SINGLE(TWL4030_REG_PREDR_CTL, 1, 0x7,
359 ARRAY_SIZE(twl4030_predriver_texts),
360 twl4030_predriver_texts,
361 twl4030_predriver_values);
362
363static const struct snd_kcontrol_new twl4030_dapm_predriver_control =
364SOC_DAPM_VALUE_ENUM("Route", twl4030_predriver_enum);
365 358
366/* Headset Left */ 359/* Headset Left */
367static const char *twl4030_hsol_texts[] = 360static const struct snd_kcontrol_new twl4030_dapm_hsol_controls[] = {
368 {"Off", "DACL1", "DACL2"}; 361 SOC_DAPM_SINGLE("Voice", TWL4030_REG_HS_SEL, 0, 1, 0),
369 362 SOC_DAPM_SINGLE("AudioL1", TWL4030_REG_HS_SEL, 1, 1, 0),
370static const struct soc_enum twl4030_hsol_enum = 363 SOC_DAPM_SINGLE("AudioL2", TWL4030_REG_HS_SEL, 2, 1, 0),
371 SOC_ENUM_SINGLE(TWL4030_REG_HS_SEL, 1, 364};
372 ARRAY_SIZE(twl4030_hsol_texts),
373 twl4030_hsol_texts);
374
375static const struct snd_kcontrol_new twl4030_dapm_hsol_control =
376SOC_DAPM_ENUM("Route", twl4030_hsol_enum);
377 365
378/* Headset Right */ 366/* Headset Right */
379static const char *twl4030_hsor_texts[] = 367static const struct snd_kcontrol_new twl4030_dapm_hsor_controls[] = {
380 {"Off", "DACR1", "DACR2"}; 368 SOC_DAPM_SINGLE("Voice", TWL4030_REG_HS_SEL, 3, 1, 0),
381 369 SOC_DAPM_SINGLE("AudioR1", TWL4030_REG_HS_SEL, 4, 1, 0),
382static const struct soc_enum twl4030_hsor_enum = 370 SOC_DAPM_SINGLE("AudioR2", TWL4030_REG_HS_SEL, 5, 1, 0),
383 SOC_ENUM_SINGLE(TWL4030_REG_HS_SEL, 4, 371};
384 ARRAY_SIZE(twl4030_hsor_texts),
385 twl4030_hsor_texts);
386
387static const struct snd_kcontrol_new twl4030_dapm_hsor_control =
388SOC_DAPM_ENUM("Route", twl4030_hsor_enum);
389 372
390/* Carkit Left */ 373/* Carkit Left */
391static const char *twl4030_carkitl_texts[] = 374static const struct snd_kcontrol_new twl4030_dapm_carkitl_controls[] = {
392 {"Off", "DACL1", "DACL2"}; 375 SOC_DAPM_SINGLE("Voice", TWL4030_REG_PRECKL_CTL, 0, 1, 0),
393 376 SOC_DAPM_SINGLE("AudioL1", TWL4030_REG_PRECKL_CTL, 1, 1, 0),
394static const struct soc_enum twl4030_carkitl_enum = 377 SOC_DAPM_SINGLE("AudioL2", TWL4030_REG_PRECKL_CTL, 2, 1, 0),
395 SOC_ENUM_SINGLE(TWL4030_REG_PRECKL_CTL, 1, 378};
396 ARRAY_SIZE(twl4030_carkitl_texts),
397 twl4030_carkitl_texts);
398
399static const struct snd_kcontrol_new twl4030_dapm_carkitl_control =
400SOC_DAPM_ENUM("Route", twl4030_carkitl_enum);
401 379
402/* Carkit Right */ 380/* Carkit Right */
403static const char *twl4030_carkitr_texts[] = 381static const struct snd_kcontrol_new twl4030_dapm_carkitr_controls[] = {
404 {"Off", "DACR1", "DACR2"}; 382 SOC_DAPM_SINGLE("Voice", TWL4030_REG_PRECKR_CTL, 0, 1, 0),
405 383 SOC_DAPM_SINGLE("AudioR1", TWL4030_REG_PRECKR_CTL, 1, 1, 0),
406static const struct soc_enum twl4030_carkitr_enum = 384 SOC_DAPM_SINGLE("AudioR2", TWL4030_REG_PRECKR_CTL, 2, 1, 0),
407 SOC_ENUM_SINGLE(TWL4030_REG_PRECKR_CTL, 1, 385};
408 ARRAY_SIZE(twl4030_carkitr_texts),
409 twl4030_carkitr_texts);
410
411static const struct snd_kcontrol_new twl4030_dapm_carkitr_control =
412SOC_DAPM_ENUM("Route", twl4030_carkitr_enum);
413 386
414/* Handsfree Left */ 387/* Handsfree Left */
415static const char *twl4030_handsfreel_texts[] = 388static const char *twl4030_handsfreel_texts[] =
416 {"Voice", "DACL1", "DACL2", "DACR2"}; 389 {"Voice", "AudioL1", "AudioL2", "AudioR2"};
417 390
418static const struct soc_enum twl4030_handsfreel_enum = 391static const struct soc_enum twl4030_handsfreel_enum =
419 SOC_ENUM_SINGLE(TWL4030_REG_HFL_CTL, 0, 392 SOC_ENUM_SINGLE(TWL4030_REG_HFL_CTL, 0,
@@ -423,9 +396,13 @@ static const struct soc_enum twl4030_handsfreel_enum =
423static const struct snd_kcontrol_new twl4030_dapm_handsfreel_control = 396static const struct snd_kcontrol_new twl4030_dapm_handsfreel_control =
424SOC_DAPM_ENUM("Route", twl4030_handsfreel_enum); 397SOC_DAPM_ENUM("Route", twl4030_handsfreel_enum);
425 398
399/* Handsfree Left virtual mute */
400static const struct snd_kcontrol_new twl4030_dapm_handsfreelmute_control =
401 SOC_DAPM_SINGLE("Switch", TWL4030_REG_SW_SHADOW, 0, 1, 0);
402
426/* Handsfree Right */ 403/* Handsfree Right */
427static const char *twl4030_handsfreer_texts[] = 404static const char *twl4030_handsfreer_texts[] =
428 {"Voice", "DACR1", "DACR2", "DACL2"}; 405 {"Voice", "AudioR1", "AudioR2", "AudioL2"};
429 406
430static const struct soc_enum twl4030_handsfreer_enum = 407static const struct soc_enum twl4030_handsfreer_enum =
431 SOC_ENUM_SINGLE(TWL4030_REG_HFR_CTL, 0, 408 SOC_ENUM_SINGLE(TWL4030_REG_HFR_CTL, 0,
@@ -435,37 +412,48 @@ static const struct soc_enum twl4030_handsfreer_enum =
435static const struct snd_kcontrol_new twl4030_dapm_handsfreer_control = 412static const struct snd_kcontrol_new twl4030_dapm_handsfreer_control =
436SOC_DAPM_ENUM("Route", twl4030_handsfreer_enum); 413SOC_DAPM_ENUM("Route", twl4030_handsfreer_enum);
437 414
438/* Left analog microphone selection */ 415/* Handsfree Right virtual mute */
439static const char *twl4030_analoglmic_texts[] = 416static const struct snd_kcontrol_new twl4030_dapm_handsfreermute_control =
440 {"Off", "Main mic", "Headset mic", "AUXL", "Carkit mic"}; 417 SOC_DAPM_SINGLE("Switch", TWL4030_REG_SW_SHADOW, 1, 1, 0);
441 418
442static const unsigned int twl4030_analoglmic_values[] = 419/* Vibra */
443 {0x0, 0x1, 0x2, 0x4, 0x8}; 420/* Vibra audio path selection */
421static const char *twl4030_vibra_texts[] =
422 {"AudioL1", "AudioR1", "AudioL2", "AudioR2"};
444 423
445static const struct soc_enum twl4030_analoglmic_enum = 424static const struct soc_enum twl4030_vibra_enum =
446 SOC_VALUE_ENUM_SINGLE(TWL4030_REG_ANAMICL, 0, 0xf, 425 SOC_ENUM_SINGLE(TWL4030_REG_VIBRA_CTL, 2,
447 ARRAY_SIZE(twl4030_analoglmic_texts), 426 ARRAY_SIZE(twl4030_vibra_texts),
448 twl4030_analoglmic_texts, 427 twl4030_vibra_texts);
449 twl4030_analoglmic_values);
450 428
451static const struct snd_kcontrol_new twl4030_dapm_analoglmic_control = 429static const struct snd_kcontrol_new twl4030_dapm_vibra_control =
452SOC_DAPM_VALUE_ENUM("Route", twl4030_analoglmic_enum); 430SOC_DAPM_ENUM("Route", twl4030_vibra_enum);
453 431
454/* Right analog microphone selection */ 432/* Vibra path selection: local vibrator (PWM) or audio driven */
455static const char *twl4030_analogrmic_texts[] = 433static const char *twl4030_vibrapath_texts[] =
456 {"Off", "Sub mic", "AUXR"}; 434 {"Local vibrator", "Audio"};
457 435
458static const unsigned int twl4030_analogrmic_values[] = 436static const struct soc_enum twl4030_vibrapath_enum =
459 {0x0, 0x1, 0x4}; 437 SOC_ENUM_SINGLE(TWL4030_REG_VIBRA_CTL, 4,
438 ARRAY_SIZE(twl4030_vibrapath_texts),
439 twl4030_vibrapath_texts);
460 440
461static const struct soc_enum twl4030_analogrmic_enum = 441static const struct snd_kcontrol_new twl4030_dapm_vibrapath_control =
462 SOC_VALUE_ENUM_SINGLE(TWL4030_REG_ANAMICR, 0, 0x5, 442SOC_DAPM_ENUM("Route", twl4030_vibrapath_enum);
463 ARRAY_SIZE(twl4030_analogrmic_texts),
464 twl4030_analogrmic_texts,
465 twl4030_analogrmic_values);
466 443
467static const struct snd_kcontrol_new twl4030_dapm_analogrmic_control = 444/* Left analog microphone selection */
468SOC_DAPM_VALUE_ENUM("Route", twl4030_analogrmic_enum); 445static const struct snd_kcontrol_new twl4030_dapm_analoglmic_controls[] = {
446 SOC_DAPM_SINGLE("Main mic", TWL4030_REG_ANAMICL, 0, 1, 0),
447 SOC_DAPM_SINGLE("Headset mic", TWL4030_REG_ANAMICL, 1, 1, 0),
448 SOC_DAPM_SINGLE("AUXL", TWL4030_REG_ANAMICL, 2, 1, 0),
449 SOC_DAPM_SINGLE("Carkit mic", TWL4030_REG_ANAMICL, 3, 1, 0),
450};
451
452/* Right analog microphone selection */
453static const struct snd_kcontrol_new twl4030_dapm_analogrmic_controls[] = {
454 SOC_DAPM_SINGLE("Sub mic", TWL4030_REG_ANAMICR, 0, 1, 0),
455 SOC_DAPM_SINGLE("AUXR", TWL4030_REG_ANAMICR, 2, 1, 0),
456};
469 457
470/* TX1 L/R Analog/Digital microphone selection */ 458/* TX1 L/R Analog/Digital microphone selection */
471static const char *twl4030_micpathtx1_texts[] = 459static const char *twl4030_micpathtx1_texts[] =
@@ -507,6 +495,10 @@ static const struct snd_kcontrol_new twl4030_dapm_abypassr2_control =
507static const struct snd_kcontrol_new twl4030_dapm_abypassl2_control = 495static const struct snd_kcontrol_new twl4030_dapm_abypassl2_control =
508 SOC_DAPM_SINGLE("Switch", TWL4030_REG_ARXL2_APGA_CTL, 2, 1, 0); 496 SOC_DAPM_SINGLE("Switch", TWL4030_REG_ARXL2_APGA_CTL, 2, 1, 0);
509 497
498/* Analog bypass for Voice */
499static const struct snd_kcontrol_new twl4030_dapm_abypassv_control =
500 SOC_DAPM_SINGLE("Switch", TWL4030_REG_VDL_APGA_CTL, 2, 1, 0);
501
510/* Digital bypass gain, 0 mutes the bypass */ 502/* Digital bypass gain, 0 mutes the bypass */
511static const unsigned int twl4030_dapm_dbypass_tlv[] = { 503static const unsigned int twl4030_dapm_dbypass_tlv[] = {
512 TLV_DB_RANGE_HEAD(2), 504 TLV_DB_RANGE_HEAD(2),
@@ -526,6 +518,18 @@ static const struct snd_kcontrol_new twl4030_dapm_dbypassr_control =
526 TWL4030_REG_ATX2ARXPGA, 0, 7, 0, 518 TWL4030_REG_ATX2ARXPGA, 0, 7, 0,
527 twl4030_dapm_dbypass_tlv); 519 twl4030_dapm_dbypass_tlv);
528 520
521/*
522 * Voice Sidetone GAIN volume control:
523 * from -51 to -10 dB in 1 dB steps (mute instead of -51 dB)
524 */
525static DECLARE_TLV_DB_SCALE(twl4030_dapm_dbypassv_tlv, -5100, 100, 1);
526
527/* Digital bypass voice: sidetone (VUL -> VDL)*/
528static const struct snd_kcontrol_new twl4030_dapm_dbypassv_control =
529 SOC_DAPM_SINGLE_TLV("Volume",
530 TWL4030_REG_VSTPGA, 0, 0x29, 0,
531 twl4030_dapm_dbypassv_tlv);
532
529static int micpath_event(struct snd_soc_dapm_widget *w, 533static int micpath_event(struct snd_soc_dapm_widget *w,
530 struct snd_kcontrol *kcontrol, int event) 534 struct snd_kcontrol *kcontrol, int event)
531{ 535{
@@ -556,63 +560,143 @@ static int micpath_event(struct snd_soc_dapm_widget *w,
556 return 0; 560 return 0;
557} 561}
558 562
559static int handsfree_event(struct snd_soc_dapm_widget *w, 563static void handsfree_ramp(struct snd_soc_codec *codec, int reg, int ramp)
560 struct snd_kcontrol *kcontrol, int event)
561{ 564{
562 struct soc_enum *e = (struct soc_enum *)w->kcontrols->private_value;
563 unsigned char hs_ctl; 565 unsigned char hs_ctl;
564 566
565 hs_ctl = twl4030_read_reg_cache(w->codec, e->reg); 567 hs_ctl = twl4030_read_reg_cache(codec, reg);
566 568
567 if (hs_ctl & TWL4030_HF_CTL_REF_EN) { 569 if (ramp) {
570 /* HF ramp-up */
571 hs_ctl |= TWL4030_HF_CTL_REF_EN;
572 twl4030_write(codec, reg, hs_ctl);
573 udelay(10);
568 hs_ctl |= TWL4030_HF_CTL_RAMP_EN; 574 hs_ctl |= TWL4030_HF_CTL_RAMP_EN;
569 twl4030_write(w->codec, e->reg, hs_ctl); 575 twl4030_write(codec, reg, hs_ctl);
576 udelay(40);
570 hs_ctl |= TWL4030_HF_CTL_LOOP_EN; 577 hs_ctl |= TWL4030_HF_CTL_LOOP_EN;
571 twl4030_write(w->codec, e->reg, hs_ctl);
572 hs_ctl |= TWL4030_HF_CTL_HB_EN; 578 hs_ctl |= TWL4030_HF_CTL_HB_EN;
573 twl4030_write(w->codec, e->reg, hs_ctl); 579 twl4030_write(codec, reg, hs_ctl);
574 } else { 580 } else {
575 hs_ctl &= ~(TWL4030_HF_CTL_RAMP_EN | TWL4030_HF_CTL_LOOP_EN 581 /* HF ramp-down */
576 | TWL4030_HF_CTL_HB_EN); 582 hs_ctl &= ~TWL4030_HF_CTL_LOOP_EN;
577 twl4030_write(w->codec, e->reg, hs_ctl); 583 hs_ctl &= ~TWL4030_HF_CTL_HB_EN;
584 twl4030_write(codec, reg, hs_ctl);
585 hs_ctl &= ~TWL4030_HF_CTL_RAMP_EN;
586 twl4030_write(codec, reg, hs_ctl);
587 udelay(40);
588 hs_ctl &= ~TWL4030_HF_CTL_REF_EN;
589 twl4030_write(codec, reg, hs_ctl);
578 } 590 }
591}
579 592
593static int handsfreelpga_event(struct snd_soc_dapm_widget *w,
594 struct snd_kcontrol *kcontrol, int event)
595{
596 switch (event) {
597 case SND_SOC_DAPM_POST_PMU:
598 handsfree_ramp(w->codec, TWL4030_REG_HFL_CTL, 1);
599 break;
600 case SND_SOC_DAPM_POST_PMD:
601 handsfree_ramp(w->codec, TWL4030_REG_HFL_CTL, 0);
602 break;
603 }
580 return 0; 604 return 0;
581} 605}
582 606
583static int headsetl_event(struct snd_soc_dapm_widget *w, 607static int handsfreerpga_event(struct snd_soc_dapm_widget *w,
584 struct snd_kcontrol *kcontrol, int event) 608 struct snd_kcontrol *kcontrol, int event)
585{ 609{
610 switch (event) {
611 case SND_SOC_DAPM_POST_PMU:
612 handsfree_ramp(w->codec, TWL4030_REG_HFR_CTL, 1);
613 break;
614 case SND_SOC_DAPM_POST_PMD:
615 handsfree_ramp(w->codec, TWL4030_REG_HFR_CTL, 0);
616 break;
617 }
618 return 0;
619}
620
621static void headset_ramp(struct snd_soc_codec *codec, int ramp)
622{
586 unsigned char hs_gain, hs_pop; 623 unsigned char hs_gain, hs_pop;
624 struct twl4030_priv *twl4030 = codec->private_data;
625 /* Base values for ramp delay calculation: 2^19 - 2^26 */
626 unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304,
627 8388608, 16777216, 33554432, 67108864};
587 628
588 /* Save the current volume */ 629 hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET);
589 hs_gain = twl4030_read_reg_cache(w->codec, TWL4030_REG_HS_GAIN_SET); 630 hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
590 hs_pop = twl4030_read_reg_cache(w->codec, TWL4030_REG_HS_POPN_SET);
591 631
592 switch (event) { 632 if (ramp) {
593 case SND_SOC_DAPM_POST_PMU: 633 /* Headset ramp-up according to the TRM */
594 /* Do the anti-pop/bias ramp enable according to the TRM */
595 hs_pop |= TWL4030_VMID_EN; 634 hs_pop |= TWL4030_VMID_EN;
596 twl4030_write(w->codec, TWL4030_REG_HS_POPN_SET, hs_pop); 635 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
597 /* Is this needed? Can we just use whatever gain here? */ 636 twl4030_write(codec, TWL4030_REG_HS_GAIN_SET, hs_gain);
598 twl4030_write(w->codec, TWL4030_REG_HS_GAIN_SET,
599 (hs_gain & (~0x0f)) | 0x0a);
600 hs_pop |= TWL4030_RAMP_EN; 637 hs_pop |= TWL4030_RAMP_EN;
601 twl4030_write(w->codec, TWL4030_REG_HS_POPN_SET, hs_pop); 638 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
602 639 } else {
603 /* Restore the original volume */ 640 /* Headset ramp-down _not_ according to
604 twl4030_write(w->codec, TWL4030_REG_HS_GAIN_SET, hs_gain); 641 * the TRM, but in a way that it is working */
605 break;
606 case SND_SOC_DAPM_POST_PMD:
607 /* Do the anti-pop/bias ramp disable according to the TRM */
608 hs_pop &= ~TWL4030_RAMP_EN; 642 hs_pop &= ~TWL4030_RAMP_EN;
609 twl4030_write(w->codec, TWL4030_REG_HS_POPN_SET, hs_pop); 643 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
644 /* Wait ramp delay time + 1, so the VMID can settle */
645 mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
646 twl4030->sysclk) + 1);
610 /* Bypass the reg_cache to mute the headset */ 647 /* Bypass the reg_cache to mute the headset */
611 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 648 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
612 hs_gain & (~0x0f), 649 hs_gain & (~0x0f),
613 TWL4030_REG_HS_GAIN_SET); 650 TWL4030_REG_HS_GAIN_SET);
651
614 hs_pop &= ~TWL4030_VMID_EN; 652 hs_pop &= ~TWL4030_VMID_EN;
615 twl4030_write(w->codec, TWL4030_REG_HS_POPN_SET, hs_pop); 653 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
654 }
655}
656
657static int headsetlpga_event(struct snd_soc_dapm_widget *w,
658 struct snd_kcontrol *kcontrol, int event)
659{
660 struct twl4030_priv *twl4030 = w->codec->private_data;
661
662 switch (event) {
663 case SND_SOC_DAPM_POST_PMU:
664 /* Do the ramp-up only once */
665 if (!twl4030->hsr_enabled)
666 headset_ramp(w->codec, 1);
667
668 twl4030->hsl_enabled = 1;
669 break;
670 case SND_SOC_DAPM_POST_PMD:
671 /* Do the ramp-down only if both headsetL/R is disabled */
672 if (!twl4030->hsr_enabled)
673 headset_ramp(w->codec, 0);
674
675 twl4030->hsl_enabled = 0;
676 break;
677 }
678 return 0;
679}
680
681static int headsetrpga_event(struct snd_soc_dapm_widget *w,
682 struct snd_kcontrol *kcontrol, int event)
683{
684 struct twl4030_priv *twl4030 = w->codec->private_data;
685
686 switch (event) {
687 case SND_SOC_DAPM_POST_PMU:
688 /* Do the ramp-up only once */
689 if (!twl4030->hsl_enabled)
690 headset_ramp(w->codec, 1);
691
692 twl4030->hsr_enabled = 1;
693 break;
694 case SND_SOC_DAPM_POST_PMD:
695 /* Do the ramp-down only if both headsetL/R is disabled */
696 if (!twl4030->hsl_enabled)
697 headset_ramp(w->codec, 0);
698
699 twl4030->hsr_enabled = 0;
616 break; 700 break;
617 } 701 }
618 return 0; 702 return 0;
@@ -624,7 +708,7 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
624 struct soc_mixer_control *m = 708 struct soc_mixer_control *m =
625 (struct soc_mixer_control *)w->kcontrols->private_value; 709 (struct soc_mixer_control *)w->kcontrols->private_value;
626 struct twl4030_priv *twl4030 = w->codec->private_data; 710 struct twl4030_priv *twl4030 = w->codec->private_data;
627 unsigned char reg; 711 unsigned char reg, misc;
628 712
629 reg = twl4030_read_reg_cache(w->codec, m->reg); 713 reg = twl4030_read_reg_cache(w->codec, m->reg);
630 714
@@ -636,14 +720,34 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
636 else 720 else
637 twl4030->bypass_state &= 721 twl4030->bypass_state &=
638 ~(1 << (m->reg - TWL4030_REG_ARXL1_APGA_CTL)); 722 ~(1 << (m->reg - TWL4030_REG_ARXL1_APGA_CTL));
723 } else if (m->reg == TWL4030_REG_VDL_APGA_CTL) {
724 /* Analog voice bypass */
725 if (reg & (1 << m->shift))
726 twl4030->bypass_state |= (1 << 4);
727 else
728 twl4030->bypass_state &= ~(1 << 4);
729 } else if (m->reg == TWL4030_REG_VSTPGA) {
730 /* Voice digital bypass */
731 if (reg)
732 twl4030->bypass_state |= (1 << 5);
733 else
734 twl4030->bypass_state &= ~(1 << 5);
639 } else { 735 } else {
640 /* Digital bypass */ 736 /* Digital bypass */
641 if (reg & (0x7 << m->shift)) 737 if (reg & (0x7 << m->shift))
642 twl4030->bypass_state |= (1 << (m->shift ? 5 : 4)); 738 twl4030->bypass_state |= (1 << (m->shift ? 7 : 6));
643 else 739 else
644 twl4030->bypass_state &= ~(1 << (m->shift ? 5 : 4)); 740 twl4030->bypass_state &= ~(1 << (m->shift ? 7 : 6));
645 } 741 }
646 742
743 /* Enable master analog loopback mode if any analog switch is enabled*/
744 misc = twl4030_read_reg_cache(w->codec, TWL4030_REG_MISC_SET_1);
745 if (twl4030->bypass_state & 0x1F)
746 misc |= TWL4030_FMLOOP_EN;
747 else
748 misc &= ~TWL4030_FMLOOP_EN;
749 twl4030_write(w->codec, TWL4030_REG_MISC_SET_1, misc);
750
647 if (w->codec->bias_level == SND_SOC_BIAS_STANDBY) { 751 if (w->codec->bias_level == SND_SOC_BIAS_STANDBY) {
648 if (twl4030->bypass_state) 752 if (twl4030->bypass_state)
649 twl4030_codec_mute(w->codec, 0); 753 twl4030_codec_mute(w->codec, 0);
@@ -810,6 +914,48 @@ static int snd_soc_put_volsw_r2_twl4030(struct snd_kcontrol *kcontrol,
810 return err; 914 return err;
811} 915}
812 916
917/* Codec operation modes */
918static const char *twl4030_op_modes_texts[] = {
919 "Option 2 (voice/audio)", "Option 1 (audio)"
920};
921
922static const struct soc_enum twl4030_op_modes_enum =
923 SOC_ENUM_SINGLE(TWL4030_REG_CODEC_MODE, 0,
924 ARRAY_SIZE(twl4030_op_modes_texts),
925 twl4030_op_modes_texts);
926
927int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
928 struct snd_ctl_elem_value *ucontrol)
929{
930 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
931 struct twl4030_priv *twl4030 = codec->private_data;
932 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
933 unsigned short val;
934 unsigned short mask, bitmask;
935
936 if (twl4030->configured) {
937 printk(KERN_ERR "twl4030 operation mode cannot be "
938 "changed on-the-fly\n");
939 return -EBUSY;
940 }
941
942 for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
943 ;
944 if (ucontrol->value.enumerated.item[0] > e->max - 1)
945 return -EINVAL;
946
947 val = ucontrol->value.enumerated.item[0] << e->shift_l;
948 mask = (bitmask - 1) << e->shift_l;
949 if (e->shift_l != e->shift_r) {
950 if (ucontrol->value.enumerated.item[1] > e->max - 1)
951 return -EINVAL;
952 val |= ucontrol->value.enumerated.item[1] << e->shift_r;
953 mask |= (bitmask - 1) << e->shift_r;
954 }
955
956 return snd_soc_update_bits(codec, e->reg, mask, val);
957}
958
813/* 959/*
814 * FGAIN volume control: 960 * FGAIN volume control:
815 * from -62 to 0 dB in 1 dB steps (mute instead of -63 dB) 961 * from -62 to 0 dB in 1 dB steps (mute instead of -63 dB)
@@ -824,6 +970,12 @@ static DECLARE_TLV_DB_SCALE(digital_fine_tlv, -6300, 100, 1);
824static DECLARE_TLV_DB_SCALE(digital_coarse_tlv, 0, 600, 0); 970static DECLARE_TLV_DB_SCALE(digital_coarse_tlv, 0, 600, 0);
825 971
826/* 972/*
973 * Voice Downlink GAIN volume control:
974 * from -37 to 12 dB in 1 dB steps (mute instead of -37 dB)
975 */
976static DECLARE_TLV_DB_SCALE(digital_voice_downlink_tlv, -3700, 100, 1);
977
978/*
827 * Analog playback gain 979 * Analog playback gain
828 * -24 dB to 12 dB in 2 dB steps 980 * -24 dB to 12 dB in 2 dB steps
829 */ 981 */
@@ -864,7 +1016,32 @@ static const struct soc_enum twl4030_rampdelay_enum =
864 ARRAY_SIZE(twl4030_rampdelay_texts), 1016 ARRAY_SIZE(twl4030_rampdelay_texts),
865 twl4030_rampdelay_texts); 1017 twl4030_rampdelay_texts);
866 1018
1019/* Vibra H-bridge direction mode */
1020static const char *twl4030_vibradirmode_texts[] = {
1021 "Vibra H-bridge direction", "Audio data MSB",
1022};
1023
1024static const struct soc_enum twl4030_vibradirmode_enum =
1025 SOC_ENUM_SINGLE(TWL4030_REG_VIBRA_CTL, 5,
1026 ARRAY_SIZE(twl4030_vibradirmode_texts),
1027 twl4030_vibradirmode_texts);
1028
1029/* Vibra H-bridge direction */
1030static const char *twl4030_vibradir_texts[] = {
1031 "Positive polarity", "Negative polarity",
1032};
1033
1034static const struct soc_enum twl4030_vibradir_enum =
1035 SOC_ENUM_SINGLE(TWL4030_REG_VIBRA_CTL, 1,
1036 ARRAY_SIZE(twl4030_vibradir_texts),
1037 twl4030_vibradir_texts);
1038
867static const struct snd_kcontrol_new twl4030_snd_controls[] = { 1039static const struct snd_kcontrol_new twl4030_snd_controls[] = {
1040 /* Codec operation mode control */
1041 SOC_ENUM_EXT("Codec Operation Mode", twl4030_op_modes_enum,
1042 snd_soc_get_enum_double,
1043 snd_soc_put_twl4030_opmode_enum_double),
1044
868 /* Common playback gain controls */ 1045 /* Common playback gain controls */
869 SOC_DOUBLE_R_TLV("DAC1 Digital Fine Playback Volume", 1046 SOC_DOUBLE_R_TLV("DAC1 Digital Fine Playback Volume",
870 TWL4030_REG_ARXL1PGA, TWL4030_REG_ARXR1PGA, 1047 TWL4030_REG_ARXL1PGA, TWL4030_REG_ARXR1PGA,
@@ -893,6 +1070,16 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
893 TWL4030_REG_ARXL2_APGA_CTL, TWL4030_REG_ARXR2_APGA_CTL, 1070 TWL4030_REG_ARXL2_APGA_CTL, TWL4030_REG_ARXR2_APGA_CTL,
894 1, 1, 0), 1071 1, 1, 0),
895 1072
1073 /* Common voice downlink gain controls */
1074 SOC_SINGLE_TLV("DAC Voice Digital Downlink Volume",
1075 TWL4030_REG_VRXPGA, 0, 0x31, 0, digital_voice_downlink_tlv),
1076
1077 SOC_SINGLE_TLV("DAC Voice Analog Downlink Volume",
1078 TWL4030_REG_VDL_APGA_CTL, 3, 0x12, 1, analog_tlv),
1079
1080 SOC_SINGLE("DAC Voice Analog Downlink Switch",
1081 TWL4030_REG_VDL_APGA_CTL, 1, 1, 0),
1082
896 /* Separate output gain controls */ 1083 /* Separate output gain controls */
897 SOC_DOUBLE_R_TLV_TWL4030("PreDriv Playback Volume", 1084 SOC_DOUBLE_R_TLV_TWL4030("PreDriv Playback Volume",
898 TWL4030_REG_PREDL_CTL, TWL4030_REG_PREDR_CTL, 1085 TWL4030_REG_PREDL_CTL, TWL4030_REG_PREDR_CTL,
@@ -920,6 +1107,9 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
920 0, 3, 5, 0, input_gain_tlv), 1107 0, 3, 5, 0, input_gain_tlv),
921 1108
922 SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum), 1109 SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum),
1110
1111 SOC_ENUM("Vibra H-bridge mode", twl4030_vibradirmode_enum),
1112 SOC_ENUM("Vibra H-bridge direction", twl4030_vibradir_enum),
923}; 1113};
924 1114
925static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = { 1115static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
@@ -947,26 +1137,19 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
947 SND_SOC_DAPM_OUTPUT("CARKITR"), 1137 SND_SOC_DAPM_OUTPUT("CARKITR"),
948 SND_SOC_DAPM_OUTPUT("HFL"), 1138 SND_SOC_DAPM_OUTPUT("HFL"),
949 SND_SOC_DAPM_OUTPUT("HFR"), 1139 SND_SOC_DAPM_OUTPUT("HFR"),
1140 SND_SOC_DAPM_OUTPUT("VIBRA"),
950 1141
951 /* DACs */ 1142 /* DACs */
952 SND_SOC_DAPM_DAC("DAC Right1", "Right Front Playback", 1143 SND_SOC_DAPM_DAC("DAC Right1", "Right Front HiFi Playback",
953 SND_SOC_NOPM, 0, 0), 1144 SND_SOC_NOPM, 0, 0),
954 SND_SOC_DAPM_DAC("DAC Left1", "Left Front Playback", 1145 SND_SOC_DAPM_DAC("DAC Left1", "Left Front HiFi Playback",
955 SND_SOC_NOPM, 0, 0), 1146 SND_SOC_NOPM, 0, 0),
956 SND_SOC_DAPM_DAC("DAC Right2", "Right Rear Playback", 1147 SND_SOC_DAPM_DAC("DAC Right2", "Right Rear HiFi Playback",
957 SND_SOC_NOPM, 0, 0), 1148 SND_SOC_NOPM, 0, 0),
958 SND_SOC_DAPM_DAC("DAC Left2", "Left Rear Playback", 1149 SND_SOC_DAPM_DAC("DAC Left2", "Left Rear HiFi Playback",
1150 SND_SOC_NOPM, 0, 0),
1151 SND_SOC_DAPM_DAC("DAC Voice", "Voice Playback",
959 SND_SOC_NOPM, 0, 0), 1152 SND_SOC_NOPM, 0, 0),
960
961 /* Analog PGAs */
962 SND_SOC_DAPM_PGA("ARXR1_APGA", TWL4030_REG_ARXR1_APGA_CTL,
963 0, 0, NULL, 0),
964 SND_SOC_DAPM_PGA("ARXL1_APGA", TWL4030_REG_ARXL1_APGA_CTL,
965 0, 0, NULL, 0),
966 SND_SOC_DAPM_PGA("ARXR2_APGA", TWL4030_REG_ARXR2_APGA_CTL,
967 0, 0, NULL, 0),
968 SND_SOC_DAPM_PGA("ARXL2_APGA", TWL4030_REG_ARXL2_APGA_CTL,
969 0, 0, NULL, 0),
970 1153
971 /* Analog bypasses */ 1154 /* Analog bypasses */
972 SND_SOC_DAPM_SWITCH_E("Right1 Analog Loopback", SND_SOC_NOPM, 0, 0, 1155 SND_SOC_DAPM_SWITCH_E("Right1 Analog Loopback", SND_SOC_NOPM, 0, 0,
@@ -981,6 +1164,9 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
981 SND_SOC_DAPM_SWITCH_E("Left2 Analog Loopback", SND_SOC_NOPM, 0, 0, 1164 SND_SOC_DAPM_SWITCH_E("Left2 Analog Loopback", SND_SOC_NOPM, 0, 0,
982 &twl4030_dapm_abypassl2_control, 1165 &twl4030_dapm_abypassl2_control,
983 bypass_event, SND_SOC_DAPM_POST_REG), 1166 bypass_event, SND_SOC_DAPM_POST_REG),
1167 SND_SOC_DAPM_SWITCH_E("Voice Analog Loopback", SND_SOC_NOPM, 0, 0,
1168 &twl4030_dapm_abypassv_control,
1169 bypass_event, SND_SOC_DAPM_POST_REG),
984 1170
985 /* Digital bypasses */ 1171 /* Digital bypasses */
986 SND_SOC_DAPM_SWITCH_E("Left Digital Loopback", SND_SOC_NOPM, 0, 0, 1172 SND_SOC_DAPM_SWITCH_E("Left Digital Loopback", SND_SOC_NOPM, 0, 0,
@@ -989,43 +1175,88 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
989 SND_SOC_DAPM_SWITCH_E("Right Digital Loopback", SND_SOC_NOPM, 0, 0, 1175 SND_SOC_DAPM_SWITCH_E("Right Digital Loopback", SND_SOC_NOPM, 0, 0,
990 &twl4030_dapm_dbypassr_control, bypass_event, 1176 &twl4030_dapm_dbypassr_control, bypass_event,
991 SND_SOC_DAPM_POST_REG), 1177 SND_SOC_DAPM_POST_REG),
1178 SND_SOC_DAPM_SWITCH_E("Voice Digital Loopback", SND_SOC_NOPM, 0, 0,
1179 &twl4030_dapm_dbypassv_control, bypass_event,
1180 SND_SOC_DAPM_POST_REG),
992 1181
993 SND_SOC_DAPM_MIXER("Analog R1 Playback Mixer", TWL4030_REG_AVDAC_CTL, 1182 /* Digital mixers, power control for the physical DACs */
994 0, 0, NULL, 0), 1183 SND_SOC_DAPM_MIXER("Digital R1 Playback Mixer",
995 SND_SOC_DAPM_MIXER("Analog L1 Playback Mixer", TWL4030_REG_AVDAC_CTL, 1184 TWL4030_REG_AVDAC_CTL, 0, 0, NULL, 0),
996 1, 0, NULL, 0), 1185 SND_SOC_DAPM_MIXER("Digital L1 Playback Mixer",
997 SND_SOC_DAPM_MIXER("Analog R2 Playback Mixer", TWL4030_REG_AVDAC_CTL, 1186 TWL4030_REG_AVDAC_CTL, 1, 0, NULL, 0),
998 2, 0, NULL, 0), 1187 SND_SOC_DAPM_MIXER("Digital R2 Playback Mixer",
999 SND_SOC_DAPM_MIXER("Analog L2 Playback Mixer", TWL4030_REG_AVDAC_CTL, 1188 TWL4030_REG_AVDAC_CTL, 2, 0, NULL, 0),
1000 3, 0, NULL, 0), 1189 SND_SOC_DAPM_MIXER("Digital L2 Playback Mixer",
1001 1190 TWL4030_REG_AVDAC_CTL, 3, 0, NULL, 0),
1002 /* Output MUX controls */ 1191 SND_SOC_DAPM_MIXER("Digital Voice Playback Mixer",
1192 TWL4030_REG_AVDAC_CTL, 4, 0, NULL, 0),
1193
1194 /* Analog mixers, power control for the physical PGAs */
1195 SND_SOC_DAPM_MIXER("Analog R1 Playback Mixer",
1196 TWL4030_REG_ARXR1_APGA_CTL, 0, 0, NULL, 0),
1197 SND_SOC_DAPM_MIXER("Analog L1 Playback Mixer",
1198 TWL4030_REG_ARXL1_APGA_CTL, 0, 0, NULL, 0),
1199 SND_SOC_DAPM_MIXER("Analog R2 Playback Mixer",
1200 TWL4030_REG_ARXR2_APGA_CTL, 0, 0, NULL, 0),
1201 SND_SOC_DAPM_MIXER("Analog L2 Playback Mixer",
1202 TWL4030_REG_ARXL2_APGA_CTL, 0, 0, NULL, 0),
1203 SND_SOC_DAPM_MIXER("Analog Voice Playback Mixer",
1204 TWL4030_REG_VDL_APGA_CTL, 0, 0, NULL, 0),
1205
1206 /* Output MIXER controls */
1003 /* Earpiece */ 1207 /* Earpiece */
1004 SND_SOC_DAPM_VALUE_MUX("Earpiece Mux", SND_SOC_NOPM, 0, 0, 1208 SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0,
1005 &twl4030_dapm_earpiece_control), 1209 &twl4030_dapm_earpiece_controls[0],
1210 ARRAY_SIZE(twl4030_dapm_earpiece_controls)),
1006 /* PreDrivL/R */ 1211 /* PreDrivL/R */
1007 SND_SOC_DAPM_VALUE_MUX("PredriveL Mux", SND_SOC_NOPM, 0, 0, 1212 SND_SOC_DAPM_MIXER("PredriveL Mixer", SND_SOC_NOPM, 0, 0,
1008 &twl4030_dapm_predrivel_control), 1213 &twl4030_dapm_predrivel_controls[0],
1009 SND_SOC_DAPM_VALUE_MUX("PredriveR Mux", SND_SOC_NOPM, 0, 0, 1214 ARRAY_SIZE(twl4030_dapm_predrivel_controls)),
1010 &twl4030_dapm_predriver_control), 1215 SND_SOC_DAPM_MIXER("PredriveR Mixer", SND_SOC_NOPM, 0, 0,
1216 &twl4030_dapm_predriver_controls[0],
1217 ARRAY_SIZE(twl4030_dapm_predriver_controls)),
1011 /* HeadsetL/R */ 1218 /* HeadsetL/R */
1012 SND_SOC_DAPM_MUX_E("HeadsetL Mux", SND_SOC_NOPM, 0, 0, 1219 SND_SOC_DAPM_MIXER("HeadsetL Mixer", SND_SOC_NOPM, 0, 0,
1013 &twl4030_dapm_hsol_control, headsetl_event, 1220 &twl4030_dapm_hsol_controls[0],
1014 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), 1221 ARRAY_SIZE(twl4030_dapm_hsol_controls)),
1015 SND_SOC_DAPM_MUX("HeadsetR Mux", SND_SOC_NOPM, 0, 0, 1222 SND_SOC_DAPM_PGA_E("HeadsetL PGA", SND_SOC_NOPM,
1016 &twl4030_dapm_hsor_control), 1223 0, 0, NULL, 0, headsetlpga_event,
1224 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1225 SND_SOC_DAPM_MIXER("HeadsetR Mixer", SND_SOC_NOPM, 0, 0,
1226 &twl4030_dapm_hsor_controls[0],
1227 ARRAY_SIZE(twl4030_dapm_hsor_controls)),
1228 SND_SOC_DAPM_PGA_E("HeadsetR PGA", SND_SOC_NOPM,
1229 0, 0, NULL, 0, headsetrpga_event,
1230 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1017 /* CarkitL/R */ 1231 /* CarkitL/R */
1018 SND_SOC_DAPM_MUX("CarkitL Mux", SND_SOC_NOPM, 0, 0, 1232 SND_SOC_DAPM_MIXER("CarkitL Mixer", SND_SOC_NOPM, 0, 0,
1019 &twl4030_dapm_carkitl_control), 1233 &twl4030_dapm_carkitl_controls[0],
1020 SND_SOC_DAPM_MUX("CarkitR Mux", SND_SOC_NOPM, 0, 0, 1234 ARRAY_SIZE(twl4030_dapm_carkitl_controls)),
1021 &twl4030_dapm_carkitr_control), 1235 SND_SOC_DAPM_MIXER("CarkitR Mixer", SND_SOC_NOPM, 0, 0,
1236 &twl4030_dapm_carkitr_controls[0],
1237 ARRAY_SIZE(twl4030_dapm_carkitr_controls)),
1238
1239 /* Output MUX controls */
1022 /* HandsfreeL/R */ 1240 /* HandsfreeL/R */
1023 SND_SOC_DAPM_MUX_E("HandsfreeL Mux", TWL4030_REG_HFL_CTL, 5, 0, 1241 SND_SOC_DAPM_MUX("HandsfreeL Mux", SND_SOC_NOPM, 0, 0,
1024 &twl4030_dapm_handsfreel_control, handsfree_event, 1242 &twl4030_dapm_handsfreel_control),
1025 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), 1243 SND_SOC_DAPM_SWITCH("HandsfreeL Switch", SND_SOC_NOPM, 0, 0,
1026 SND_SOC_DAPM_MUX_E("HandsfreeR Mux", TWL4030_REG_HFR_CTL, 5, 0, 1244 &twl4030_dapm_handsfreelmute_control),
1027 &twl4030_dapm_handsfreer_control, handsfree_event, 1245 SND_SOC_DAPM_PGA_E("HandsfreeL PGA", SND_SOC_NOPM,
1028 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), 1246 0, 0, NULL, 0, handsfreelpga_event,
1247 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1248 SND_SOC_DAPM_MUX("HandsfreeR Mux", SND_SOC_NOPM, 5, 0,
1249 &twl4030_dapm_handsfreer_control),
1250 SND_SOC_DAPM_SWITCH("HandsfreeR Switch", SND_SOC_NOPM, 0, 0,
1251 &twl4030_dapm_handsfreermute_control),
1252 SND_SOC_DAPM_PGA_E("HandsfreeR PGA", SND_SOC_NOPM,
1253 0, 0, NULL, 0, handsfreerpga_event,
1254 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1255 /* Vibra */
1256 SND_SOC_DAPM_MUX("Vibra Mux", TWL4030_REG_VIBRA_CTL, 0, 0,
1257 &twl4030_dapm_vibra_control),
1258 SND_SOC_DAPM_MUX("Vibra Route", SND_SOC_NOPM, 0, 0,
1259 &twl4030_dapm_vibrapath_control),
1029 1260
1030 /* Introducing four virtual ADC, since TWL4030 have four channel for 1261 /* Introducing four virtual ADC, since TWL4030 have four channel for
1031 capture */ 1262 capture */
@@ -1050,11 +1281,15 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
1050 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD| 1281 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD|
1051 SND_SOC_DAPM_POST_REG), 1282 SND_SOC_DAPM_POST_REG),
1052 1283
1053 /* Analog input muxes with switch for the capture amplifiers */ 1284 /* Analog input mixers for the capture amplifiers */
1054 SND_SOC_DAPM_VALUE_MUX("Analog Left Capture Route", 1285 SND_SOC_DAPM_MIXER("Analog Left Capture Route",
1055 TWL4030_REG_ANAMICL, 4, 0, &twl4030_dapm_analoglmic_control), 1286 TWL4030_REG_ANAMICL, 4, 0,
1056 SND_SOC_DAPM_VALUE_MUX("Analog Right Capture Route", 1287 &twl4030_dapm_analoglmic_controls[0],
1057 TWL4030_REG_ANAMICR, 4, 0, &twl4030_dapm_analogrmic_control), 1288 ARRAY_SIZE(twl4030_dapm_analoglmic_controls)),
1289 SND_SOC_DAPM_MIXER("Analog Right Capture Route",
1290 TWL4030_REG_ANAMICR, 4, 0,
1291 &twl4030_dapm_analogrmic_controls[0],
1292 ARRAY_SIZE(twl4030_dapm_analogrmic_controls)),
1058 1293
1059 SND_SOC_DAPM_PGA("ADC Physical Left", 1294 SND_SOC_DAPM_PGA("ADC Physical Left",
1060 TWL4030_REG_AVADC_CTL, 3, 0, NULL, 0), 1295 TWL4030_REG_AVADC_CTL, 3, 0, NULL, 0),
@@ -1073,62 +1308,86 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
1073}; 1308};
1074 1309
1075static const struct snd_soc_dapm_route intercon[] = { 1310static const struct snd_soc_dapm_route intercon[] = {
1076 {"Analog L1 Playback Mixer", NULL, "DAC Left1"}, 1311 {"Digital L1 Playback Mixer", NULL, "DAC Left1"},
1077 {"Analog R1 Playback Mixer", NULL, "DAC Right1"}, 1312 {"Digital R1 Playback Mixer", NULL, "DAC Right1"},
1078 {"Analog L2 Playback Mixer", NULL, "DAC Left2"}, 1313 {"Digital L2 Playback Mixer", NULL, "DAC Left2"},
1079 {"Analog R2 Playback Mixer", NULL, "DAC Right2"}, 1314 {"Digital R2 Playback Mixer", NULL, "DAC Right2"},
1080 1315 {"Digital Voice Playback Mixer", NULL, "DAC Voice"},
1081 {"ARXL1_APGA", NULL, "Analog L1 Playback Mixer"}, 1316
1082 {"ARXR1_APGA", NULL, "Analog R1 Playback Mixer"}, 1317 {"Analog L1 Playback Mixer", NULL, "Digital L1 Playback Mixer"},
1083 {"ARXL2_APGA", NULL, "Analog L2 Playback Mixer"}, 1318 {"Analog R1 Playback Mixer", NULL, "Digital R1 Playback Mixer"},
1084 {"ARXR2_APGA", NULL, "Analog R2 Playback Mixer"}, 1319 {"Analog L2 Playback Mixer", NULL, "Digital L2 Playback Mixer"},
1320 {"Analog R2 Playback Mixer", NULL, "Digital R2 Playback Mixer"},
1321 {"Analog Voice Playback Mixer", NULL, "Digital Voice Playback Mixer"},
1085 1322
1086 /* Internal playback routings */ 1323 /* Internal playback routings */
1087 /* Earpiece */ 1324 /* Earpiece */
1088 {"Earpiece Mux", "DACL1", "ARXL1_APGA"}, 1325 {"Earpiece Mixer", "Voice", "Analog Voice Playback Mixer"},
1089 {"Earpiece Mux", "DACL2", "ARXL2_APGA"}, 1326 {"Earpiece Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1090 {"Earpiece Mux", "DACR1", "ARXR1_APGA"}, 1327 {"Earpiece Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1328 {"Earpiece Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1091 /* PreDrivL */ 1329 /* PreDrivL */
1092 {"PredriveL Mux", "DACL1", "ARXL1_APGA"}, 1330 {"PredriveL Mixer", "Voice", "Analog Voice Playback Mixer"},
1093 {"PredriveL Mux", "DACL2", "ARXL2_APGA"}, 1331 {"PredriveL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1094 {"PredriveL Mux", "DACR2", "ARXR2_APGA"}, 1332 {"PredriveL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1333 {"PredriveL Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1095 /* PreDrivR */ 1334 /* PreDrivR */
1096 {"PredriveR Mux", "DACR1", "ARXR1_APGA"}, 1335 {"PredriveR Mixer", "Voice", "Analog Voice Playback Mixer"},
1097 {"PredriveR Mux", "DACR2", "ARXR2_APGA"}, 1336 {"PredriveR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1098 {"PredriveR Mux", "DACL2", "ARXL2_APGA"}, 1337 {"PredriveR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1338 {"PredriveR Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1099 /* HeadsetL */ 1339 /* HeadsetL */
1100 {"HeadsetL Mux", "DACL1", "ARXL1_APGA"}, 1340 {"HeadsetL Mixer", "Voice", "Analog Voice Playback Mixer"},
1101 {"HeadsetL Mux", "DACL2", "ARXL2_APGA"}, 1341 {"HeadsetL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1342 {"HeadsetL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1343 {"HeadsetL PGA", NULL, "HeadsetL Mixer"},
1102 /* HeadsetR */ 1344 /* HeadsetR */
1103 {"HeadsetR Mux", "DACR1", "ARXR1_APGA"}, 1345 {"HeadsetR Mixer", "Voice", "Analog Voice Playback Mixer"},
1104 {"HeadsetR Mux", "DACR2", "ARXR2_APGA"}, 1346 {"HeadsetR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1347 {"HeadsetR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1348 {"HeadsetR PGA", NULL, "HeadsetR Mixer"},
1105 /* CarkitL */ 1349 /* CarkitL */
1106 {"CarkitL Mux", "DACL1", "ARXL1_APGA"}, 1350 {"CarkitL Mixer", "Voice", "Analog Voice Playback Mixer"},
1107 {"CarkitL Mux", "DACL2", "ARXL2_APGA"}, 1351 {"CarkitL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1352 {"CarkitL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1108 /* CarkitR */ 1353 /* CarkitR */
1109 {"CarkitR Mux", "DACR1", "ARXR1_APGA"}, 1354 {"CarkitR Mixer", "Voice", "Analog Voice Playback Mixer"},
1110 {"CarkitR Mux", "DACR2", "ARXR2_APGA"}, 1355 {"CarkitR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1356 {"CarkitR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1111 /* HandsfreeL */ 1357 /* HandsfreeL */
1112 {"HandsfreeL Mux", "DACL1", "ARXL1_APGA"}, 1358 {"HandsfreeL Mux", "Voice", "Analog Voice Playback Mixer"},
1113 {"HandsfreeL Mux", "DACL2", "ARXL2_APGA"}, 1359 {"HandsfreeL Mux", "AudioL1", "Analog L1 Playback Mixer"},
1114 {"HandsfreeL Mux", "DACR2", "ARXR2_APGA"}, 1360 {"HandsfreeL Mux", "AudioL2", "Analog L2 Playback Mixer"},
1361 {"HandsfreeL Mux", "AudioR2", "Analog R2 Playback Mixer"},
1362 {"HandsfreeL Switch", "Switch", "HandsfreeL Mux"},
1363 {"HandsfreeL PGA", NULL, "HandsfreeL Switch"},
1115 /* HandsfreeR */ 1364 /* HandsfreeR */
1116 {"HandsfreeR Mux", "DACR1", "ARXR1_APGA"}, 1365 {"HandsfreeR Mux", "Voice", "Analog Voice Playback Mixer"},
1117 {"HandsfreeR Mux", "DACR2", "ARXR2_APGA"}, 1366 {"HandsfreeR Mux", "AudioR1", "Analog R1 Playback Mixer"},
1118 {"HandsfreeR Mux", "DACL2", "ARXL2_APGA"}, 1367 {"HandsfreeR Mux", "AudioR2", "Analog R2 Playback Mixer"},
1368 {"HandsfreeR Mux", "AudioL2", "Analog L2 Playback Mixer"},
1369 {"HandsfreeR Switch", "Switch", "HandsfreeR Mux"},
1370 {"HandsfreeR PGA", NULL, "HandsfreeR Switch"},
1371 /* Vibra */
1372 {"Vibra Mux", "AudioL1", "DAC Left1"},
1373 {"Vibra Mux", "AudioR1", "DAC Right1"},
1374 {"Vibra Mux", "AudioL2", "DAC Left2"},
1375 {"Vibra Mux", "AudioR2", "DAC Right2"},
1119 1376
1120 /* outputs */ 1377 /* outputs */
1121 {"OUTL", NULL, "ARXL2_APGA"}, 1378 {"OUTL", NULL, "Analog L2 Playback Mixer"},
1122 {"OUTR", NULL, "ARXR2_APGA"}, 1379 {"OUTR", NULL, "Analog R2 Playback Mixer"},
1123 {"EARPIECE", NULL, "Earpiece Mux"}, 1380 {"EARPIECE", NULL, "Earpiece Mixer"},
1124 {"PREDRIVEL", NULL, "PredriveL Mux"}, 1381 {"PREDRIVEL", NULL, "PredriveL Mixer"},
1125 {"PREDRIVER", NULL, "PredriveR Mux"}, 1382 {"PREDRIVER", NULL, "PredriveR Mixer"},
1126 {"HSOL", NULL, "HeadsetL Mux"}, 1383 {"HSOL", NULL, "HeadsetL PGA"},
1127 {"HSOR", NULL, "HeadsetR Mux"}, 1384 {"HSOR", NULL, "HeadsetR PGA"},
1128 {"CARKITL", NULL, "CarkitL Mux"}, 1385 {"CARKITL", NULL, "CarkitL Mixer"},
1129 {"CARKITR", NULL, "CarkitR Mux"}, 1386 {"CARKITR", NULL, "CarkitR Mixer"},
1130 {"HFL", NULL, "HandsfreeL Mux"}, 1387 {"HFL", NULL, "HandsfreeL PGA"},
1131 {"HFR", NULL, "HandsfreeR Mux"}, 1388 {"HFR", NULL, "HandsfreeR PGA"},
1389 {"Vibra Route", "Audio", "Vibra Mux"},
1390 {"VIBRA", NULL, "Vibra Route"},
1132 1391
1133 /* Capture path */ 1392 /* Capture path */
1134 {"Analog Left Capture Route", "Main mic", "MAINMIC"}, 1393 {"Analog Left Capture Route", "Main mic", "MAINMIC"},
@@ -1168,18 +1427,22 @@ static const struct snd_soc_dapm_route intercon[] = {
1168 {"Left1 Analog Loopback", "Switch", "Analog Left Capture Route"}, 1427 {"Left1 Analog Loopback", "Switch", "Analog Left Capture Route"},
1169 {"Right2 Analog Loopback", "Switch", "Analog Right Capture Route"}, 1428 {"Right2 Analog Loopback", "Switch", "Analog Right Capture Route"},
1170 {"Left2 Analog Loopback", "Switch", "Analog Left Capture Route"}, 1429 {"Left2 Analog Loopback", "Switch", "Analog Left Capture Route"},
1430 {"Voice Analog Loopback", "Switch", "Analog Left Capture Route"},
1171 1431
1172 {"Analog R1 Playback Mixer", NULL, "Right1 Analog Loopback"}, 1432 {"Analog R1 Playback Mixer", NULL, "Right1 Analog Loopback"},
1173 {"Analog L1 Playback Mixer", NULL, "Left1 Analog Loopback"}, 1433 {"Analog L1 Playback Mixer", NULL, "Left1 Analog Loopback"},
1174 {"Analog R2 Playback Mixer", NULL, "Right2 Analog Loopback"}, 1434 {"Analog R2 Playback Mixer", NULL, "Right2 Analog Loopback"},
1175 {"Analog L2 Playback Mixer", NULL, "Left2 Analog Loopback"}, 1435 {"Analog L2 Playback Mixer", NULL, "Left2 Analog Loopback"},
1436 {"Analog Voice Playback Mixer", NULL, "Voice Analog Loopback"},
1176 1437
1177 /* Digital bypass routes */ 1438 /* Digital bypass routes */
1178 {"Right Digital Loopback", "Volume", "TX1 Capture Route"}, 1439 {"Right Digital Loopback", "Volume", "TX1 Capture Route"},
1179 {"Left Digital Loopback", "Volume", "TX1 Capture Route"}, 1440 {"Left Digital Loopback", "Volume", "TX1 Capture Route"},
1441 {"Voice Digital Loopback", "Volume", "TX2 Capture Route"},
1180 1442
1181 {"Analog R2 Playback Mixer", NULL, "Right Digital Loopback"}, 1443 {"Digital R2 Playback Mixer", NULL, "Right Digital Loopback"},
1182 {"Analog L2 Playback Mixer", NULL, "Left Digital Loopback"}, 1444 {"Digital L2 Playback Mixer", NULL, "Left Digital Loopback"},
1445 {"Digital Voice Playback Mixer", NULL, "Voice Digital Loopback"},
1183 1446
1184}; 1447};
1185 1448
@@ -1226,6 +1489,58 @@ static int twl4030_set_bias_level(struct snd_soc_codec *codec,
1226 return 0; 1489 return 0;
1227} 1490}
1228 1491
1492static void twl4030_constraints(struct twl4030_priv *twl4030,
1493 struct snd_pcm_substream *mst_substream)
1494{
1495 struct snd_pcm_substream *slv_substream;
1496
1497 /* Pick the stream, which need to be constrained */
1498 if (mst_substream == twl4030->master_substream)
1499 slv_substream = twl4030->slave_substream;
1500 else if (mst_substream == twl4030->slave_substream)
1501 slv_substream = twl4030->master_substream;
1502 else /* This should not happen.. */
1503 return;
1504
1505 /* Set the constraints according to the already configured stream */
1506 snd_pcm_hw_constraint_minmax(slv_substream->runtime,
1507 SNDRV_PCM_HW_PARAM_RATE,
1508 twl4030->rate,
1509 twl4030->rate);
1510
1511 snd_pcm_hw_constraint_minmax(slv_substream->runtime,
1512 SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
1513 twl4030->sample_bits,
1514 twl4030->sample_bits);
1515
1516 snd_pcm_hw_constraint_minmax(slv_substream->runtime,
1517 SNDRV_PCM_HW_PARAM_CHANNELS,
1518 twl4030->channels,
1519 twl4030->channels);
1520}
1521
1522/* In case of 4 channel mode, the RX1 L/R for playback and the TX2 L/R for
1523 * capture has to be enabled/disabled. */
1524static void twl4030_tdm_enable(struct snd_soc_codec *codec, int direction,
1525 int enable)
1526{
1527 u8 reg, mask;
1528
1529 reg = twl4030_read_reg_cache(codec, TWL4030_REG_OPTION);
1530
1531 if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1532 mask = TWL4030_ARXL1_VRX_EN | TWL4030_ARXR1_EN;
1533 else
1534 mask = TWL4030_ATXL2_VTXL_EN | TWL4030_ATXR2_VTXR_EN;
1535
1536 if (enable)
1537 reg |= mask;
1538 else
1539 reg &= ~mask;
1540
1541 twl4030_write(codec, TWL4030_REG_OPTION, reg);
1542}
1543
1229static int twl4030_startup(struct snd_pcm_substream *substream, 1544static int twl4030_startup(struct snd_pcm_substream *substream,
1230 struct snd_soc_dai *dai) 1545 struct snd_soc_dai *dai)
1231{ 1546{
@@ -1234,26 +1549,25 @@ static int twl4030_startup(struct snd_pcm_substream *substream,
1234 struct snd_soc_codec *codec = socdev->card->codec; 1549 struct snd_soc_codec *codec = socdev->card->codec;
1235 struct twl4030_priv *twl4030 = codec->private_data; 1550 struct twl4030_priv *twl4030 = codec->private_data;
1236 1551
1237 /* If we already have a playback or capture going then constrain
1238 * this substream to match it.
1239 */
1240 if (twl4030->master_substream) { 1552 if (twl4030->master_substream) {
1241 struct snd_pcm_runtime *master_runtime;
1242 master_runtime = twl4030->master_substream->runtime;
1243
1244 snd_pcm_hw_constraint_minmax(substream->runtime,
1245 SNDRV_PCM_HW_PARAM_RATE,
1246 master_runtime->rate,
1247 master_runtime->rate);
1248
1249 snd_pcm_hw_constraint_minmax(substream->runtime,
1250 SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
1251 master_runtime->sample_bits,
1252 master_runtime->sample_bits);
1253
1254 twl4030->slave_substream = substream; 1553 twl4030->slave_substream = substream;
1255 } else 1554 /* The DAI has one configuration for playback and capture, so
1555 * if the DAI has been already configured then constrain this
1556 * substream to match it. */
1557 if (twl4030->configured)
1558 twl4030_constraints(twl4030, twl4030->master_substream);
1559 } else {
1560 if (!(twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE) &
1561 TWL4030_OPTION_1)) {
1562 /* In option2 4 channel is not supported, set the
1563 * constraint for the first stream for channels, the
1564 * second stream will 'inherit' this cosntraint */
1565 snd_pcm_hw_constraint_minmax(substream->runtime,
1566 SNDRV_PCM_HW_PARAM_CHANNELS,
1567 2, 2);
1568 }
1256 twl4030->master_substream = substream; 1569 twl4030->master_substream = substream;
1570 }
1257 1571
1258 return 0; 1572 return 0;
1259} 1573}
@@ -1270,6 +1584,17 @@ static void twl4030_shutdown(struct snd_pcm_substream *substream,
1270 twl4030->master_substream = twl4030->slave_substream; 1584 twl4030->master_substream = twl4030->slave_substream;
1271 1585
1272 twl4030->slave_substream = NULL; 1586 twl4030->slave_substream = NULL;
1587
1588 /* If all streams are closed, or the remaining stream has not yet
1589 * been configured than set the DAI as not configured. */
1590 if (!twl4030->master_substream)
1591 twl4030->configured = 0;
1592 else if (!twl4030->master_substream->runtime->channels)
1593 twl4030->configured = 0;
1594
1595 /* If the closing substream had 4 channel, do the necessary cleanup */
1596 if (substream->runtime->channels == 4)
1597 twl4030_tdm_enable(codec, substream->stream, 0);
1273} 1598}
1274 1599
1275static int twl4030_hw_params(struct snd_pcm_substream *substream, 1600static int twl4030_hw_params(struct snd_pcm_substream *substream,
@@ -1282,8 +1607,24 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
1282 struct twl4030_priv *twl4030 = codec->private_data; 1607 struct twl4030_priv *twl4030 = codec->private_data;
1283 u8 mode, old_mode, format, old_format; 1608 u8 mode, old_mode, format, old_format;
1284 1609
1285 if (substream == twl4030->slave_substream) 1610 /* If the substream has 4 channel, do the necessary setup */
1286 /* Ignoring hw_params for slave substream */ 1611 if (params_channels(params) == 4) {
1612 u8 format, mode;
1613
1614 format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
1615 mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE);
1616
1617 /* Safety check: are we in the correct operating mode and
1618 * the interface is in TDM mode? */
1619 if ((mode & TWL4030_OPTION_1) &&
1620 ((format & TWL4030_AIF_FORMAT) == TWL4030_AIF_FORMAT_TDM))
1621 twl4030_tdm_enable(codec, substream->stream, 1);
1622 else
1623 return -EINVAL;
1624 }
1625
1626 if (twl4030->configured)
1627 /* Ignoring hw_params for already configured DAI */
1287 return 0; 1628 return 0;
1288 1629
1289 /* bit rate */ 1630 /* bit rate */
@@ -1363,6 +1704,21 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
1363 /* set CODECPDZ afterwards */ 1704 /* set CODECPDZ afterwards */
1364 twl4030_codec_enable(codec, 1); 1705 twl4030_codec_enable(codec, 1);
1365 } 1706 }
1707
1708 /* Store the important parameters for the DAI configuration and set
1709 * the DAI as configured */
1710 twl4030->configured = 1;
1711 twl4030->rate = params_rate(params);
1712 twl4030->sample_bits = hw_param_interval(params,
1713 SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min;
1714 twl4030->channels = params_channels(params);
1715
1716 /* If both playback and capture streams are open, and one of them
1717 * is setting the hw parameters right now (since we are here), set
1718 * constraints to the other stream to match the current one. */
1719 if (twl4030->slave_substream)
1720 twl4030_constraints(twl4030, substream);
1721
1366 return 0; 1722 return 0;
1367} 1723}
1368 1724
@@ -1370,17 +1726,21 @@ static int twl4030_set_dai_sysclk(struct snd_soc_dai *codec_dai,
1370 int clk_id, unsigned int freq, int dir) 1726 int clk_id, unsigned int freq, int dir)
1371{ 1727{
1372 struct snd_soc_codec *codec = codec_dai->codec; 1728 struct snd_soc_codec *codec = codec_dai->codec;
1729 struct twl4030_priv *twl4030 = codec->private_data;
1373 u8 infreq; 1730 u8 infreq;
1374 1731
1375 switch (freq) { 1732 switch (freq) {
1376 case 19200000: 1733 case 19200000:
1377 infreq = TWL4030_APLL_INFREQ_19200KHZ; 1734 infreq = TWL4030_APLL_INFREQ_19200KHZ;
1735 twl4030->sysclk = 19200;
1378 break; 1736 break;
1379 case 26000000: 1737 case 26000000:
1380 infreq = TWL4030_APLL_INFREQ_26000KHZ; 1738 infreq = TWL4030_APLL_INFREQ_26000KHZ;
1739 twl4030->sysclk = 26000;
1381 break; 1740 break;
1382 case 38400000: 1741 case 38400000:
1383 infreq = TWL4030_APLL_INFREQ_38400KHZ; 1742 infreq = TWL4030_APLL_INFREQ_38400KHZ;
1743 twl4030->sysclk = 38400;
1384 break; 1744 break;
1385 default: 1745 default:
1386 printk(KERN_ERR "TWL4030 set sysclk: unknown rate %d\n", 1746 printk(KERN_ERR "TWL4030 set sysclk: unknown rate %d\n",
@@ -1424,6 +1784,9 @@ static int twl4030_set_dai_fmt(struct snd_soc_dai *codec_dai,
1424 case SND_SOC_DAIFMT_I2S: 1784 case SND_SOC_DAIFMT_I2S:
1425 format |= TWL4030_AIF_FORMAT_CODEC; 1785 format |= TWL4030_AIF_FORMAT_CODEC;
1426 break; 1786 break;
1787 case SND_SOC_DAIFMT_DSP_A:
1788 format |= TWL4030_AIF_FORMAT_TDM;
1789 break;
1427 default: 1790 default:
1428 return -EINVAL; 1791 return -EINVAL;
1429 } 1792 }
@@ -1443,6 +1806,180 @@ static int twl4030_set_dai_fmt(struct snd_soc_dai *codec_dai,
1443 return 0; 1806 return 0;
1444} 1807}
1445 1808
1809/* In case of voice mode, the RX1 L(VRX) for downlink and the TX2 L/R
1810 * (VTXL, VTXR) for uplink has to be enabled/disabled. */
1811static void twl4030_voice_enable(struct snd_soc_codec *codec, int direction,
1812 int enable)
1813{
1814 u8 reg, mask;
1815
1816 reg = twl4030_read_reg_cache(codec, TWL4030_REG_OPTION);
1817
1818 if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1819 mask = TWL4030_ARXL1_VRX_EN;
1820 else
1821 mask = TWL4030_ATXL2_VTXL_EN | TWL4030_ATXR2_VTXR_EN;
1822
1823 if (enable)
1824 reg |= mask;
1825 else
1826 reg &= ~mask;
1827
1828 twl4030_write(codec, TWL4030_REG_OPTION, reg);
1829}
1830
1831static int twl4030_voice_startup(struct snd_pcm_substream *substream,
1832 struct snd_soc_dai *dai)
1833{
1834 struct snd_soc_pcm_runtime *rtd = substream->private_data;
1835 struct snd_soc_device *socdev = rtd->socdev;
1836 struct snd_soc_codec *codec = socdev->card->codec;
1837 u8 infreq;
1838 u8 mode;
1839
1840 /* If the system master clock is not 26MHz, the voice PCM interface is
1841 * not avilable.
1842 */
1843 infreq = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL)
1844 & TWL4030_APLL_INFREQ;
1845
1846 if (infreq != TWL4030_APLL_INFREQ_26000KHZ) {
1847 printk(KERN_ERR "TWL4030 voice startup: "
1848 "MCLK is not 26MHz, call set_sysclk() on init\n");
1849 return -EINVAL;
1850 }
1851
1852 /* If the codec mode is not option2, the voice PCM interface is not
1853 * avilable.
1854 */
1855 mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE)
1856 & TWL4030_OPT_MODE;
1857
1858 if (mode != TWL4030_OPTION_2) {
1859 printk(KERN_ERR "TWL4030 voice startup: "
1860 "the codec mode is not option2\n");
1861 return -EINVAL;
1862 }
1863
1864 return 0;
1865}
1866
1867static void twl4030_voice_shutdown(struct snd_pcm_substream *substream,
1868 struct snd_soc_dai *dai)
1869{
1870 struct snd_soc_pcm_runtime *rtd = substream->private_data;
1871 struct snd_soc_device *socdev = rtd->socdev;
1872 struct snd_soc_codec *codec = socdev->card->codec;
1873
1874 /* Enable voice digital filters */
1875 twl4030_voice_enable(codec, substream->stream, 0);
1876}
1877
1878static int twl4030_voice_hw_params(struct snd_pcm_substream *substream,
1879 struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
1880{
1881 struct snd_soc_pcm_runtime *rtd = substream->private_data;
1882 struct snd_soc_device *socdev = rtd->socdev;
1883 struct snd_soc_codec *codec = socdev->card->codec;
1884 u8 old_mode, mode;
1885
1886 /* Enable voice digital filters */
1887 twl4030_voice_enable(codec, substream->stream, 1);
1888
1889 /* bit rate */
1890 old_mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE)
1891 & ~(TWL4030_CODECPDZ);
1892 mode = old_mode;
1893
1894 switch (params_rate(params)) {
1895 case 8000:
1896 mode &= ~(TWL4030_SEL_16K);
1897 break;
1898 case 16000:
1899 mode |= TWL4030_SEL_16K;
1900 break;
1901 default:
1902 printk(KERN_ERR "TWL4030 voice hw params: unknown rate %d\n",
1903 params_rate(params));
1904 return -EINVAL;
1905 }
1906
1907 if (mode != old_mode) {
1908 /* change rate and set CODECPDZ */
1909 twl4030_codec_enable(codec, 0);
1910 twl4030_write(codec, TWL4030_REG_CODEC_MODE, mode);
1911 twl4030_codec_enable(codec, 1);
1912 }
1913
1914 return 0;
1915}
1916
1917static int twl4030_voice_set_dai_sysclk(struct snd_soc_dai *codec_dai,
1918 int clk_id, unsigned int freq, int dir)
1919{
1920 struct snd_soc_codec *codec = codec_dai->codec;
1921 u8 infreq;
1922
1923 switch (freq) {
1924 case 26000000:
1925 infreq = TWL4030_APLL_INFREQ_26000KHZ;
1926 break;
1927 default:
1928 printk(KERN_ERR "TWL4030 voice set sysclk: unknown rate %d\n",
1929 freq);
1930 return -EINVAL;
1931 }
1932
1933 infreq |= TWL4030_APLL_EN;
1934 twl4030_write(codec, TWL4030_REG_APLL_CTL, infreq);
1935
1936 return 0;
1937}
1938
1939static int twl4030_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
1940 unsigned int fmt)
1941{
1942 struct snd_soc_codec *codec = codec_dai->codec;
1943 u8 old_format, format;
1944
1945 /* get format */
1946 old_format = twl4030_read_reg_cache(codec, TWL4030_REG_VOICE_IF);
1947 format = old_format;
1948
1949 /* set master/slave audio interface */
1950 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
1951 case SND_SOC_DAIFMT_CBS_CFM:
1952 format &= ~(TWL4030_VIF_SLAVE_EN);
1953 break;
1954 case SND_SOC_DAIFMT_CBS_CFS:
1955 format |= TWL4030_VIF_SLAVE_EN;
1956 break;
1957 default:
1958 return -EINVAL;
1959 }
1960
1961 /* clock inversion */
1962 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
1963 case SND_SOC_DAIFMT_IB_NF:
1964 format &= ~(TWL4030_VIF_FORMAT);
1965 break;
1966 case SND_SOC_DAIFMT_NB_IF:
1967 format |= TWL4030_VIF_FORMAT;
1968 break;
1969 default:
1970 return -EINVAL;
1971 }
1972
1973 if (format != old_format) {
1974 /* change format and set CODECPDZ */
1975 twl4030_codec_enable(codec, 0);
1976 twl4030_write(codec, TWL4030_REG_VOICE_IF, format);
1977 twl4030_codec_enable(codec, 1);
1978 }
1979
1980 return 0;
1981}
1982
1446#define TWL4030_RATES (SNDRV_PCM_RATE_8000_48000) 1983#define TWL4030_RATES (SNDRV_PCM_RATE_8000_48000)
1447#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE) 1984#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE)
1448 1985
@@ -1454,21 +1991,47 @@ static struct snd_soc_dai_ops twl4030_dai_ops = {
1454 .set_fmt = twl4030_set_dai_fmt, 1991 .set_fmt = twl4030_set_dai_fmt,
1455}; 1992};
1456 1993
1457struct snd_soc_dai twl4030_dai = { 1994static struct snd_soc_dai_ops twl4030_dai_voice_ops = {
1995 .startup = twl4030_voice_startup,
1996 .shutdown = twl4030_voice_shutdown,
1997 .hw_params = twl4030_voice_hw_params,
1998 .set_sysclk = twl4030_voice_set_dai_sysclk,
1999 .set_fmt = twl4030_voice_set_dai_fmt,
2000};
2001
2002struct snd_soc_dai twl4030_dai[] = {
2003{
1458 .name = "twl4030", 2004 .name = "twl4030",
1459 .playback = { 2005 .playback = {
1460 .stream_name = "Playback", 2006 .stream_name = "HiFi Playback",
1461 .channels_min = 2, 2007 .channels_min = 2,
1462 .channels_max = 2, 2008 .channels_max = 4,
1463 .rates = TWL4030_RATES | SNDRV_PCM_RATE_96000, 2009 .rates = TWL4030_RATES | SNDRV_PCM_RATE_96000,
1464 .formats = TWL4030_FORMATS,}, 2010 .formats = TWL4030_FORMATS,},
1465 .capture = { 2011 .capture = {
1466 .stream_name = "Capture", 2012 .stream_name = "Capture",
1467 .channels_min = 2, 2013 .channels_min = 2,
1468 .channels_max = 2, 2014 .channels_max = 4,
1469 .rates = TWL4030_RATES, 2015 .rates = TWL4030_RATES,
1470 .formats = TWL4030_FORMATS,}, 2016 .formats = TWL4030_FORMATS,},
1471 .ops = &twl4030_dai_ops, 2017 .ops = &twl4030_dai_ops,
2018},
2019{
2020 .name = "twl4030 Voice",
2021 .playback = {
2022 .stream_name = "Voice Playback",
2023 .channels_min = 1,
2024 .channels_max = 1,
2025 .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
2026 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
2027 .capture = {
2028 .stream_name = "Capture",
2029 .channels_min = 1,
2030 .channels_max = 2,
2031 .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
2032 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
2033 .ops = &twl4030_dai_voice_ops,
2034},
1472}; 2035};
1473EXPORT_SYMBOL_GPL(twl4030_dai); 2036EXPORT_SYMBOL_GPL(twl4030_dai);
1474 2037
@@ -1500,6 +2063,8 @@ static int twl4030_resume(struct platform_device *pdev)
1500static int twl4030_init(struct snd_soc_device *socdev) 2063static int twl4030_init(struct snd_soc_device *socdev)
1501{ 2064{
1502 struct snd_soc_codec *codec = socdev->card->codec; 2065 struct snd_soc_codec *codec = socdev->card->codec;
2066 struct twl4030_setup_data *setup = socdev->codec_data;
2067 struct twl4030_priv *twl4030 = codec->private_data;
1503 int ret = 0; 2068 int ret = 0;
1504 2069
1505 printk(KERN_INFO "TWL4030 Audio Codec init \n"); 2070 printk(KERN_INFO "TWL4030 Audio Codec init \n");
@@ -1509,14 +2074,31 @@ static int twl4030_init(struct snd_soc_device *socdev)
1509 codec->read = twl4030_read_reg_cache; 2074 codec->read = twl4030_read_reg_cache;
1510 codec->write = twl4030_write; 2075 codec->write = twl4030_write;
1511 codec->set_bias_level = twl4030_set_bias_level; 2076 codec->set_bias_level = twl4030_set_bias_level;
1512 codec->dai = &twl4030_dai; 2077 codec->dai = twl4030_dai;
1513 codec->num_dai = 1; 2078 codec->num_dai = ARRAY_SIZE(twl4030_dai),
1514 codec->reg_cache_size = sizeof(twl4030_reg); 2079 codec->reg_cache_size = sizeof(twl4030_reg);
1515 codec->reg_cache = kmemdup(twl4030_reg, sizeof(twl4030_reg), 2080 codec->reg_cache = kmemdup(twl4030_reg, sizeof(twl4030_reg),
1516 GFP_KERNEL); 2081 GFP_KERNEL);
1517 if (codec->reg_cache == NULL) 2082 if (codec->reg_cache == NULL)
1518 return -ENOMEM; 2083 return -ENOMEM;
1519 2084
2085 /* Configuration for headset ramp delay from setup data */
2086 if (setup) {
2087 unsigned char hs_pop;
2088
2089 if (setup->sysclk)
2090 twl4030->sysclk = setup->sysclk;
2091 else
2092 twl4030->sysclk = 26000;
2093
2094 hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
2095 hs_pop &= ~TWL4030_RAMP_DELAY;
2096 hs_pop |= (setup->ramp_delay_value << 2);
2097 twl4030_write_reg_cache(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
2098 } else {
2099 twl4030->sysclk = 26000;
2100 }
2101
1520 /* register pcms */ 2102 /* register pcms */
1521 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 2103 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
1522 if (ret < 0) { 2104 if (ret < 0) {
@@ -1604,13 +2186,13 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_twl4030);
1604 2186
1605static int __init twl4030_modinit(void) 2187static int __init twl4030_modinit(void)
1606{ 2188{
1607 return snd_soc_register_dai(&twl4030_dai); 2189 return snd_soc_register_dais(&twl4030_dai[0], ARRAY_SIZE(twl4030_dai));
1608} 2190}
1609module_init(twl4030_modinit); 2191module_init(twl4030_modinit);
1610 2192
1611static void __exit twl4030_exit(void) 2193static void __exit twl4030_exit(void)
1612{ 2194{
1613 snd_soc_unregister_dai(&twl4030_dai); 2195 snd_soc_unregister_dais(&twl4030_dai[0], ARRAY_SIZE(twl4030_dai));
1614} 2196}
1615module_exit(twl4030_exit); 2197module_exit(twl4030_exit);
1616 2198
diff --git a/sound/soc/codecs/twl4030.h b/sound/soc/codecs/twl4030.h
index cb63765db1df..fe5f395d9e4f 100644
--- a/sound/soc/codecs/twl4030.h
+++ b/sound/soc/codecs/twl4030.h
@@ -92,8 +92,9 @@
92#define TWL4030_REG_VIBRA_PWM_SET 0x47 92#define TWL4030_REG_VIBRA_PWM_SET 0x47
93#define TWL4030_REG_ANAMIC_GAIN 0x48 93#define TWL4030_REG_ANAMIC_GAIN 0x48
94#define TWL4030_REG_MISC_SET_2 0x49 94#define TWL4030_REG_MISC_SET_2 0x49
95#define TWL4030_REG_SW_SHADOW 0x4A
95 96
96#define TWL4030_CACHEREGNUM (TWL4030_REG_MISC_SET_2 + 1) 97#define TWL4030_CACHEREGNUM (TWL4030_REG_SW_SHADOW + 1)
97 98
98/* Bitfield Definitions */ 99/* Bitfield Definitions */
99 100
@@ -110,9 +111,22 @@
110#define TWL4030_APLL_RATE_44100 0x90 111#define TWL4030_APLL_RATE_44100 0x90
111#define TWL4030_APLL_RATE_48000 0xA0 112#define TWL4030_APLL_RATE_48000 0xA0
112#define TWL4030_APLL_RATE_96000 0xE0 113#define TWL4030_APLL_RATE_96000 0xE0
113#define TWL4030_SEL_16K 0x04 114#define TWL4030_SEL_16K 0x08
114#define TWL4030_CODECPDZ 0x02 115#define TWL4030_CODECPDZ 0x02
115#define TWL4030_OPT_MODE 0x01 116#define TWL4030_OPT_MODE 0x01
117#define TWL4030_OPTION_1 (1 << 0)
118#define TWL4030_OPTION_2 (0 << 0)
119
120/* TWL4030_OPTION (0x02) Fields */
121
122#define TWL4030_ATXL1_EN (1 << 0)
123#define TWL4030_ATXR1_EN (1 << 1)
124#define TWL4030_ATXL2_VTXL_EN (1 << 2)
125#define TWL4030_ATXR2_VTXR_EN (1 << 3)
126#define TWL4030_ARXL1_VRX_EN (1 << 4)
127#define TWL4030_ARXR1_EN (1 << 5)
128#define TWL4030_ARXL2_EN (1 << 6)
129#define TWL4030_ARXR2_EN (1 << 7)
116 130
117/* TWL4030_REG_MICBIAS_CTL (0x04) Fields */ 131/* TWL4030_REG_MICBIAS_CTL (0x04) Fields */
118 132
@@ -171,6 +185,17 @@
171#define TWL4030_CLK256FS_EN 0x02 185#define TWL4030_CLK256FS_EN 0x02
172#define TWL4030_AIF_EN 0x01 186#define TWL4030_AIF_EN 0x01
173 187
188/* VOICE_IF (0x0F) Fields */
189
190#define TWL4030_VIF_SLAVE_EN 0x80
191#define TWL4030_VIF_DIN_EN 0x40
192#define TWL4030_VIF_DOUT_EN 0x20
193#define TWL4030_VIF_SWAP 0x10
194#define TWL4030_VIF_FORMAT 0x08
195#define TWL4030_VIF_TRI_EN 0x04
196#define TWL4030_VIF_SUB_EN 0x02
197#define TWL4030_VIF_EN 0x01
198
174/* EAR_CTL (0x21) */ 199/* EAR_CTL (0x21) */
175#define TWL4030_EAR_GAIN 0x30 200#define TWL4030_EAR_GAIN 0x30
176 201
@@ -236,7 +261,19 @@
236#define TWL4030_SMOOTH_ANAVOL_EN 0x02 261#define TWL4030_SMOOTH_ANAVOL_EN 0x02
237#define TWL4030_DIGMIC_LR_SWAP_EN 0x01 262#define TWL4030_DIGMIC_LR_SWAP_EN 0x01
238 263
239extern struct snd_soc_dai twl4030_dai; 264/* TWL4030_REG_SW_SHADOW (0x4A) Fields */
265#define TWL4030_HFL_EN 0x01
266#define TWL4030_HFR_EN 0x02
267
268#define TWL4030_DAI_HIFI 0
269#define TWL4030_DAI_VOICE 1
270
271extern struct snd_soc_dai twl4030_dai[2];
240extern struct snd_soc_codec_device soc_codec_dev_twl4030; 272extern struct snd_soc_codec_device soc_codec_dev_twl4030;
241 273
274struct twl4030_setup_data {
275 unsigned int ramp_delay_value;
276 unsigned int sysclk;
277};
278
242#endif /* End of __TWL4030_AUDIO_H__ */ 279#endif /* End of __TWL4030_AUDIO_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index ddefb8f80145..269b108e1de6 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -101,7 +101,7 @@ static int uda134x_write(struct snd_soc_codec *codec, unsigned int reg,
101 pr_debug("%s reg: %02X, value:%02X\n", __func__, reg, value); 101 pr_debug("%s reg: %02X, value:%02X\n", __func__, reg, value);
102 102
103 if (reg >= UDA134X_REGS_NUM) { 103 if (reg >= UDA134X_REGS_NUM) {
104 printk(KERN_ERR "%s unkown register: reg: %d", 104 printk(KERN_ERR "%s unkown register: reg: %u",
105 __func__, reg); 105 __func__, reg);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
@@ -296,7 +296,7 @@ static int uda134x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
296 struct snd_soc_codec *codec = codec_dai->codec; 296 struct snd_soc_codec *codec = codec_dai->codec;
297 struct uda134x_priv *uda134x = codec->private_data; 297 struct uda134x_priv *uda134x = codec->private_data;
298 298
299 pr_debug("%s clk_id: %d, freq: %d, dir: %d\n", __func__, 299 pr_debug("%s clk_id: %d, freq: %u, dir: %d\n", __func__,
300 clk_id, freq, dir); 300 clk_id, freq, dir);
301 301
302 /* Anything between 256fs*8Khz and 512fs*48Khz should be acceptable 302 /* Anything between 256fs*8Khz and 512fs*48Khz should be acceptable
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 0275321ff8ab..e7348d341b76 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1108,7 +1108,7 @@ static int wm8350_set_fll(struct snd_soc_dai *codec_dai,
1108 if (ret < 0) 1108 if (ret < 0)
1109 return ret; 1109 return ret;
1110 dev_dbg(wm8350->dev, 1110 dev_dbg(wm8350->dev,
1111 "FLL in %d FLL out %d N 0x%x K 0x%x div %d ratio %d", 1111 "FLL in %u FLL out %u N 0x%x K 0x%x div %d ratio %d",
1112 freq_in, freq_out, fll_div.n, fll_div.k, fll_div.div, 1112 freq_in, freq_out, fll_div.n, fll_div.k, fll_div.div,
1113 fll_div.ratio); 1113 fll_div.ratio);
1114 1114
diff --git a/sound/soc/codecs/wm8350.h b/sound/soc/codecs/wm8350.h
index d11bd9288cf9..d088eb4b88bb 100644
--- a/sound/soc/codecs/wm8350.h
+++ b/sound/soc/codecs/wm8350.h
@@ -13,6 +13,7 @@
13#define _WM8350_H 13#define _WM8350_H
14 14
15#include <sound/soc.h> 15#include <sound/soc.h>
16#include <linux/mfd/wm8350/audio.h>
16 17
17extern struct snd_soc_dai wm8350_dai; 18extern struct snd_soc_dai wm8350_dai;
18extern struct snd_soc_codec_device soc_codec_dev_wm8350; 19extern struct snd_soc_codec_device soc_codec_dev_wm8350;
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 510efa604008..502eefac1ecd 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -954,7 +954,7 @@ static int fll_factors(struct wm8400_priv *wm8400, struct fll_factors *factors,
954 factors->outdiv *= 2; 954 factors->outdiv *= 2;
955 if (factors->outdiv > 32) { 955 if (factors->outdiv > 32) {
956 dev_err(wm8400->wm8400->dev, 956 dev_err(wm8400->wm8400->dev,
957 "Unsupported FLL output frequency %dHz\n", 957 "Unsupported FLL output frequency %uHz\n",
958 Fout); 958 Fout);
959 return -EINVAL; 959 return -EINVAL;
960 } 960 }
@@ -1003,7 +1003,7 @@ static int fll_factors(struct wm8400_priv *wm8400, struct fll_factors *factors,
1003 factors->k = K / 10; 1003 factors->k = K / 10;
1004 1004
1005 dev_dbg(wm8400->wm8400->dev, 1005 dev_dbg(wm8400->wm8400->dev,
1006 "FLL: Fref=%d Fout=%d N=%x K=%x, FRATIO=%x OUTDIV=%x\n", 1006 "FLL: Fref=%u Fout=%u N=%x K=%x, FRATIO=%x OUTDIV=%x\n",
1007 Fref, Fout, 1007 Fref, Fout,
1008 factors->n, factors->k, factors->fratio, factors->outdiv); 1008 factors->n, factors->k, factors->fratio, factors->outdiv);
1009 1009
@@ -1473,8 +1473,8 @@ static int wm8400_codec_probe(struct platform_device *dev)
1473 1473
1474 codec = &priv->codec; 1474 codec = &priv->codec;
1475 codec->private_data = priv; 1475 codec->private_data = priv;
1476 codec->control_data = dev->dev.driver_data; 1476 codec->control_data = dev_get_drvdata(&dev->dev);
1477 priv->wm8400 = dev->dev.driver_data; 1477 priv->wm8400 = dev_get_drvdata(&dev->dev);
1478 1478
1479 ret = regulator_bulk_get(priv->wm8400->dev, 1479 ret = regulator_bulk_get(priv->wm8400->dev,
1480 ARRAY_SIZE(power), &power[0]); 1480 ARRAY_SIZE(power), &power[0]);
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 6a4cea09c45d..c8b8dba85890 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -298,7 +298,7 @@ static void pll_factors(unsigned int target, unsigned int source)
298 298
299 if ((Ndiv < 6) || (Ndiv > 12)) 299 if ((Ndiv < 6) || (Ndiv > 12))
300 printk(KERN_WARNING 300 printk(KERN_WARNING
301 "WM8510 N value %d outwith recommended range!d\n", 301 "WM8510 N value %u outwith recommended range!d\n",
302 Ndiv); 302 Ndiv);
303 303
304 pll_div.n = Ndiv; 304 pll_div.n = Ndiv;
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 9f6be3d31ac0..86c4b24db817 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -415,7 +415,7 @@ static int pll_factors(struct _pll_div *pll_div, unsigned int target,
415 unsigned int K, Ndiv, Nmod; 415 unsigned int K, Ndiv, Nmod;
416 int i; 416 int i;
417 417
418 pr_debug("wm8580: PLL %dHz->%dHz\n", source, target); 418 pr_debug("wm8580: PLL %uHz->%uHz\n", source, target);
419 419
420 /* Scale the output frequency up; the PLL should run in the 420 /* Scale the output frequency up; the PLL should run in the
421 * region of 90-100MHz. 421 * region of 90-100MHz.
@@ -447,7 +447,7 @@ static int pll_factors(struct _pll_div *pll_div, unsigned int target,
447 447
448 if ((Ndiv < 5) || (Ndiv > 13)) { 448 if ((Ndiv < 5) || (Ndiv > 13)) {
449 printk(KERN_ERR 449 printk(KERN_ERR
450 "WM8580 N=%d outside supported range\n", Ndiv); 450 "WM8580 N=%u outside supported range\n", Ndiv);
451 return -EINVAL; 451 return -EINVAL;
452 } 452 }
453 453
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index e043e3f60008..7a205876ef4f 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -666,14 +666,14 @@ static int __devinit wm8731_spi_probe(struct spi_device *spi)
666 codec->hw_write = (hw_write_t)wm8731_spi_write; 666 codec->hw_write = (hw_write_t)wm8731_spi_write;
667 codec->dev = &spi->dev; 667 codec->dev = &spi->dev;
668 668
669 spi->dev.driver_data = wm8731; 669 dev_set_drvdata(&spi->dev, wm8731);
670 670
671 return wm8731_register(wm8731); 671 return wm8731_register(wm8731);
672} 672}
673 673
674static int __devexit wm8731_spi_remove(struct spi_device *spi) 674static int __devexit wm8731_spi_remove(struct spi_device *spi)
675{ 675{
676 struct wm8731_priv *wm8731 = spi->dev.driver_data; 676 struct wm8731_priv *wm8731 = dev_get_drvdata(&spi->dev);
677 677
678 wm8731_unregister(wm8731); 678 wm8731_unregister(wm8731);
679 679
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a6e8f3f7f052..d28eeaceb857 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -703,7 +703,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int target,
703 703
704 if ((Ndiv < 6) || (Ndiv > 12)) 704 if ((Ndiv < 6) || (Ndiv > 12))
705 printk(KERN_WARNING 705 printk(KERN_WARNING
706 "wm8753: unsupported N = %d\n", Ndiv); 706 "wm8753: unsupported N = %u\n", Ndiv);
707 707
708 pll_div->n = Ndiv; 708 pll_div->n = Ndiv;
709 Nmod = target % source; 709 Nmod = target % source;
@@ -1822,14 +1822,14 @@ static int __devinit wm8753_spi_probe(struct spi_device *spi)
1822 codec->hw_write = (hw_write_t)wm8753_spi_write; 1822 codec->hw_write = (hw_write_t)wm8753_spi_write;
1823 codec->dev = &spi->dev; 1823 codec->dev = &spi->dev;
1824 1824
1825 spi->dev.driver_data = wm8753; 1825 dev_set_drvdata(&spi->dev, wm8753);
1826 1826
1827 return wm8753_register(wm8753); 1827 return wm8753_register(wm8753);
1828} 1828}
1829 1829
1830static int __devexit wm8753_spi_remove(struct spi_device *spi) 1830static int __devexit wm8753_spi_remove(struct spi_device *spi)
1831{ 1831{
1832 struct wm8753_priv *wm8753 = spi->dev.driver_data; 1832 struct wm8753_priv *wm8753 = dev_get_drvdata(&spi->dev);
1833 wm8753_unregister(wm8753); 1833 wm8753_unregister(wm8753);
1834 return 0; 1834 return 0;
1835} 1835}
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 46c5ea1ff921..3c78945244b8 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -778,11 +778,11 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
778 } 778 }
779 779
780 if (target > 100000000) 780 if (target > 100000000)
781 printk(KERN_WARNING "wm8900: FLL rate %d out of range, Fref=%d" 781 printk(KERN_WARNING "wm8900: FLL rate %u out of range, Fref=%u"
782 " Fout=%d\n", target, Fref, Fout); 782 " Fout=%u\n", target, Fref, Fout);
783 if (div > 32) { 783 if (div > 32) {
784 printk(KERN_ERR "wm8900: Invalid FLL division rate %u, " 784 printk(KERN_ERR "wm8900: Invalid FLL division rate %u, "
785 "Fref=%d, Fout=%d, target=%d\n", 785 "Fref=%u, Fout=%u, target=%u\n",
786 div, Fref, Fout, target); 786 div, Fref, Fout, target);
787 return -EINVAL; 787 return -EINVAL;
788 } 788 }
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 8cf571f1a803..d8a9222fbf74 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -217,7 +217,6 @@ struct wm8903_priv {
217 int sysclk; 217 int sysclk;
218 218
219 /* Reference counts */ 219 /* Reference counts */
220 int charge_pump_users;
221 int class_w_users; 220 int class_w_users;
222 int playback_active; 221 int playback_active;
223 int capture_active; 222 int capture_active;
@@ -373,6 +372,15 @@ static void wm8903_reset(struct snd_soc_codec *codec)
373#define WM8903_OUTPUT_INT 0x2 372#define WM8903_OUTPUT_INT 0x2
374#define WM8903_OUTPUT_IN 0x1 373#define WM8903_OUTPUT_IN 0x1
375 374
375static int wm8903_cp_event(struct snd_soc_dapm_widget *w,
376 struct snd_kcontrol *kcontrol, int event)
377{
378 WARN_ON(event != SND_SOC_DAPM_POST_PMU);
379 mdelay(4);
380
381 return 0;
382}
383
376/* 384/*
377 * Event for headphone and line out amplifier power changes. Special 385 * Event for headphone and line out amplifier power changes. Special
378 * power up/down sequences are required in order to maximise pop/click 386 * power up/down sequences are required in order to maximise pop/click
@@ -382,19 +390,20 @@ static int wm8903_output_event(struct snd_soc_dapm_widget *w,
382 struct snd_kcontrol *kcontrol, int event) 390 struct snd_kcontrol *kcontrol, int event)
383{ 391{
384 struct snd_soc_codec *codec = w->codec; 392 struct snd_soc_codec *codec = w->codec;
385 struct wm8903_priv *wm8903 = codec->private_data;
386 struct i2c_client *i2c = codec->control_data;
387 u16 val; 393 u16 val;
388 u16 reg; 394 u16 reg;
395 u16 dcs_reg;
396 u16 dcs_bit;
389 int shift; 397 int shift;
390 u16 cp_reg = wm8903_read(codec, WM8903_CHARGE_PUMP_0);
391 398
392 switch (w->reg) { 399 switch (w->reg) {
393 case WM8903_POWER_MANAGEMENT_2: 400 case WM8903_POWER_MANAGEMENT_2:
394 reg = WM8903_ANALOGUE_HP_0; 401 reg = WM8903_ANALOGUE_HP_0;
402 dcs_bit = 0 + w->shift;
395 break; 403 break;
396 case WM8903_POWER_MANAGEMENT_3: 404 case WM8903_POWER_MANAGEMENT_3:
397 reg = WM8903_ANALOGUE_LINEOUT_0; 405 reg = WM8903_ANALOGUE_LINEOUT_0;
406 dcs_bit = 2 + w->shift;
398 break; 407 break;
399 default: 408 default:
400 BUG(); 409 BUG();
@@ -419,18 +428,6 @@ static int wm8903_output_event(struct snd_soc_dapm_widget *w,
419 /* Short the output */ 428 /* Short the output */
420 val &= ~(WM8903_OUTPUT_SHORT << shift); 429 val &= ~(WM8903_OUTPUT_SHORT << shift);
421 wm8903_write(codec, reg, val); 430 wm8903_write(codec, reg, val);
422
423 wm8903->charge_pump_users++;
424
425 dev_dbg(&i2c->dev, "Charge pump use count now %d\n",
426 wm8903->charge_pump_users);
427
428 if (wm8903->charge_pump_users == 1) {
429 dev_dbg(&i2c->dev, "Enabling charge pump\n");
430 wm8903_write(codec, WM8903_CHARGE_PUMP_0,
431 cp_reg | WM8903_CP_ENA);
432 mdelay(4);
433 }
434 } 431 }
435 432
436 if (event & SND_SOC_DAPM_POST_PMU) { 433 if (event & SND_SOC_DAPM_POST_PMU) {
@@ -446,6 +443,11 @@ static int wm8903_output_event(struct snd_soc_dapm_widget *w,
446 val |= (WM8903_OUTPUT_OUT << shift); 443 val |= (WM8903_OUTPUT_OUT << shift);
447 wm8903_write(codec, reg, val); 444 wm8903_write(codec, reg, val);
448 445
446 /* Enable the DC servo */
447 dcs_reg = wm8903_read(codec, WM8903_DC_SERVO_0);
448 dcs_reg |= dcs_bit;
449 wm8903_write(codec, WM8903_DC_SERVO_0, dcs_reg);
450
449 /* Remove the short */ 451 /* Remove the short */
450 val |= (WM8903_OUTPUT_SHORT << shift); 452 val |= (WM8903_OUTPUT_SHORT << shift);
451 wm8903_write(codec, reg, val); 453 wm8903_write(codec, reg, val);
@@ -458,25 +460,17 @@ static int wm8903_output_event(struct snd_soc_dapm_widget *w,
458 val &= ~(WM8903_OUTPUT_SHORT << shift); 460 val &= ~(WM8903_OUTPUT_SHORT << shift);
459 wm8903_write(codec, reg, val); 461 wm8903_write(codec, reg, val);
460 462
463 /* Disable the DC servo */
464 dcs_reg = wm8903_read(codec, WM8903_DC_SERVO_0);
465 dcs_reg &= ~dcs_bit;
466 wm8903_write(codec, WM8903_DC_SERVO_0, dcs_reg);
467
461 /* Then disable the intermediate and output stages */ 468 /* Then disable the intermediate and output stages */
462 val &= ~((WM8903_OUTPUT_OUT | WM8903_OUTPUT_INT | 469 val &= ~((WM8903_OUTPUT_OUT | WM8903_OUTPUT_INT |
463 WM8903_OUTPUT_IN) << shift); 470 WM8903_OUTPUT_IN) << shift);
464 wm8903_write(codec, reg, val); 471 wm8903_write(codec, reg, val);
465 } 472 }
466 473
467 if (event & SND_SOC_DAPM_POST_PMD) {
468 wm8903->charge_pump_users--;
469
470 dev_dbg(&i2c->dev, "Charge pump use count now %d\n",
471 wm8903->charge_pump_users);
472
473 if (wm8903->charge_pump_users == 0) {
474 dev_dbg(&i2c->dev, "Disabling charge pump\n");
475 wm8903_write(codec, WM8903_CHARGE_PUMP_0,
476 cp_reg & ~WM8903_CP_ENA);
477 }
478 }
479
480 return 0; 474 return 0;
481} 475}
482 476
@@ -539,6 +533,7 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
539/* ALSA can only do steps of .01dB */ 533/* ALSA can only do steps of .01dB */
540static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); 534static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
541 535
536static const DECLARE_TLV_DB_SCALE(digital_sidetone_tlv, -3600, 300, 0);
542static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); 537static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
543 538
544static const DECLARE_TLV_DB_SCALE(drc_tlv_thresh, 0, 75, 0); 539static const DECLARE_TLV_DB_SCALE(drc_tlv_thresh, 0, 75, 0);
@@ -657,6 +652,16 @@ static const struct soc_enum rinput_inv_enum =
657 SOC_ENUM_SINGLE(WM8903_ANALOGUE_RIGHT_INPUT_1, 4, 3, rinput_mux_text); 652 SOC_ENUM_SINGLE(WM8903_ANALOGUE_RIGHT_INPUT_1, 4, 3, rinput_mux_text);
658 653
659 654
655static const char *sidetone_text[] = {
656 "None", "Left", "Right"
657};
658
659static const struct soc_enum lsidetone_enum =
660 SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_0, 2, 3, sidetone_text);
661
662static const struct soc_enum rsidetone_enum =
663 SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_0, 0, 3, sidetone_text);
664
660static const struct snd_kcontrol_new wm8903_snd_controls[] = { 665static const struct snd_kcontrol_new wm8903_snd_controls[] = {
661 666
662/* Input PGAs - No TLV since the scale depends on PGA mode */ 667/* Input PGAs - No TLV since the scale depends on PGA mode */
@@ -700,6 +705,9 @@ SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT,
700SOC_ENUM("ADC Companding Mode", adc_companding), 705SOC_ENUM("ADC Companding Mode", adc_companding),
701SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), 706SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0),
702 707
708SOC_DOUBLE_TLV("Digital Sidetone Volume", WM8903_DAC_DIGITAL_0, 4, 8,
709 12, 0, digital_sidetone_tlv),
710
703/* DAC */ 711/* DAC */
704SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8903_DAC_DIGITAL_VOLUME_LEFT, 712SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8903_DAC_DIGITAL_VOLUME_LEFT,
705 WM8903_DAC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), 713 WM8903_DAC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
@@ -762,6 +770,12 @@ static const struct snd_kcontrol_new rinput_mux =
762static const struct snd_kcontrol_new rinput_inv_mux = 770static const struct snd_kcontrol_new rinput_inv_mux =
763 SOC_DAPM_ENUM("Right Inverting Input Mux", rinput_inv_enum); 771 SOC_DAPM_ENUM("Right Inverting Input Mux", rinput_inv_enum);
764 772
773static const struct snd_kcontrol_new lsidetone_mux =
774 SOC_DAPM_ENUM("DACL Sidetone Mux", lsidetone_enum);
775
776static const struct snd_kcontrol_new rsidetone_mux =
777 SOC_DAPM_ENUM("DACR Sidetone Mux", rsidetone_enum);
778
765static const struct snd_kcontrol_new left_output_mixer[] = { 779static const struct snd_kcontrol_new left_output_mixer[] = {
766SOC_DAPM_SINGLE("DACL Switch", WM8903_ANALOGUE_LEFT_MIX_0, 3, 1, 0), 780SOC_DAPM_SINGLE("DACL Switch", WM8903_ANALOGUE_LEFT_MIX_0, 3, 1, 0),
767SOC_DAPM_SINGLE("DACR Switch", WM8903_ANALOGUE_LEFT_MIX_0, 2, 1, 0), 781SOC_DAPM_SINGLE("DACR Switch", WM8903_ANALOGUE_LEFT_MIX_0, 2, 1, 0),
@@ -828,6 +842,9 @@ SND_SOC_DAPM_PGA("Right Input PGA", WM8903_POWER_MANAGEMENT_0, 0, 0, NULL, 0),
828SND_SOC_DAPM_ADC("ADCL", "Left HiFi Capture", WM8903_POWER_MANAGEMENT_6, 1, 0), 842SND_SOC_DAPM_ADC("ADCL", "Left HiFi Capture", WM8903_POWER_MANAGEMENT_6, 1, 0),
829SND_SOC_DAPM_ADC("ADCR", "Right HiFi Capture", WM8903_POWER_MANAGEMENT_6, 0, 0), 843SND_SOC_DAPM_ADC("ADCR", "Right HiFi Capture", WM8903_POWER_MANAGEMENT_6, 0, 0),
830 844
845SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &lsidetone_mux),
846SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &rsidetone_mux),
847
831SND_SOC_DAPM_DAC("DACL", "Left Playback", WM8903_POWER_MANAGEMENT_6, 3, 0), 848SND_SOC_DAPM_DAC("DACL", "Left Playback", WM8903_POWER_MANAGEMENT_6, 3, 0),
832SND_SOC_DAPM_DAC("DACR", "Right Playback", WM8903_POWER_MANAGEMENT_6, 2, 0), 849SND_SOC_DAPM_DAC("DACR", "Right Playback", WM8903_POWER_MANAGEMENT_6, 2, 0),
833 850
@@ -844,26 +861,29 @@ SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0,
844SND_SOC_DAPM_PGA_E("Left Headphone Output PGA", WM8903_POWER_MANAGEMENT_2, 861SND_SOC_DAPM_PGA_E("Left Headphone Output PGA", WM8903_POWER_MANAGEMENT_2,
845 1, 0, NULL, 0, wm8903_output_event, 862 1, 0, NULL, 0, wm8903_output_event,
846 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | 863 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
847 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), 864 SND_SOC_DAPM_PRE_PMD),
848SND_SOC_DAPM_PGA_E("Right Headphone Output PGA", WM8903_POWER_MANAGEMENT_2, 865SND_SOC_DAPM_PGA_E("Right Headphone Output PGA", WM8903_POWER_MANAGEMENT_2,
849 0, 0, NULL, 0, wm8903_output_event, 866 0, 0, NULL, 0, wm8903_output_event,
850 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | 867 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
851 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), 868 SND_SOC_DAPM_PRE_PMD),
852 869
853SND_SOC_DAPM_PGA_E("Left Line Output PGA", WM8903_POWER_MANAGEMENT_3, 1, 0, 870SND_SOC_DAPM_PGA_E("Left Line Output PGA", WM8903_POWER_MANAGEMENT_3, 1, 0,
854 NULL, 0, wm8903_output_event, 871 NULL, 0, wm8903_output_event,
855 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | 872 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
856 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), 873 SND_SOC_DAPM_PRE_PMD),
857SND_SOC_DAPM_PGA_E("Right Line Output PGA", WM8903_POWER_MANAGEMENT_3, 0, 0, 874SND_SOC_DAPM_PGA_E("Right Line Output PGA", WM8903_POWER_MANAGEMENT_3, 0, 0,
858 NULL, 0, wm8903_output_event, 875 NULL, 0, wm8903_output_event,
859 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | 876 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
860 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), 877 SND_SOC_DAPM_PRE_PMD),
861 878
862SND_SOC_DAPM_PGA("Left Speaker PGA", WM8903_POWER_MANAGEMENT_5, 1, 0, 879SND_SOC_DAPM_PGA("Left Speaker PGA", WM8903_POWER_MANAGEMENT_5, 1, 0,
863 NULL, 0), 880 NULL, 0),
864SND_SOC_DAPM_PGA("Right Speaker PGA", WM8903_POWER_MANAGEMENT_5, 0, 0, 881SND_SOC_DAPM_PGA("Right Speaker PGA", WM8903_POWER_MANAGEMENT_5, 0, 0,
865 NULL, 0), 882 NULL, 0),
866 883
884SND_SOC_DAPM_SUPPLY("Charge Pump", WM8903_CHARGE_PUMP_0, 0, 0,
885 wm8903_cp_event, SND_SOC_DAPM_POST_PMU),
886SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8903_CLOCK_RATES_2, 1, 0, NULL, 0),
867}; 887};
868 888
869static const struct snd_soc_dapm_route intercon[] = { 889static const struct snd_soc_dapm_route intercon[] = {
@@ -909,7 +929,19 @@ static const struct snd_soc_dapm_route intercon[] = {
909 { "Right Input PGA", NULL, "Right Input Mode Mux" }, 929 { "Right Input PGA", NULL, "Right Input Mode Mux" },
910 930
911 { "ADCL", NULL, "Left Input PGA" }, 931 { "ADCL", NULL, "Left Input PGA" },
932 { "ADCL", NULL, "CLK_DSP" },
912 { "ADCR", NULL, "Right Input PGA" }, 933 { "ADCR", NULL, "Right Input PGA" },
934 { "ADCR", NULL, "CLK_DSP" },
935
936 { "DACL Sidetone", "Left", "ADCL" },
937 { "DACL Sidetone", "Right", "ADCR" },
938 { "DACR Sidetone", "Left", "ADCL" },
939 { "DACR Sidetone", "Right", "ADCR" },
940
941 { "DACL", NULL, "DACL Sidetone" },
942 { "DACL", NULL, "CLK_DSP" },
943 { "DACR", NULL, "DACR Sidetone" },
944 { "DACR", NULL, "CLK_DSP" },
913 945
914 { "Left Output Mixer", "Left Bypass Switch", "Left Input PGA" }, 946 { "Left Output Mixer", "Left Bypass Switch", "Left Input PGA" },
915 { "Left Output Mixer", "Right Bypass Switch", "Right Input PGA" }, 947 { "Left Output Mixer", "Right Bypass Switch", "Right Input PGA" },
@@ -951,6 +983,11 @@ static const struct snd_soc_dapm_route intercon[] = {
951 983
952 { "ROP", NULL, "Right Speaker PGA" }, 984 { "ROP", NULL, "Right Speaker PGA" },
953 { "RON", NULL, "Right Speaker PGA" }, 985 { "RON", NULL, "Right Speaker PGA" },
986
987 { "Left Headphone Output PGA", NULL, "Charge Pump" },
988 { "Right Headphone Output PGA", NULL, "Charge Pump" },
989 { "Left Line Output PGA", NULL, "Charge Pump" },
990 { "Right Line Output PGA", NULL, "Charge Pump" },
954}; 991};
955 992
956static int wm8903_add_widgets(struct snd_soc_codec *codec) 993static int wm8903_add_widgets(struct snd_soc_codec *codec)
@@ -985,6 +1022,11 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec,
985 wm8903_write(codec, WM8903_CLOCK_RATES_2, 1022 wm8903_write(codec, WM8903_CLOCK_RATES_2,
986 WM8903_CLK_SYS_ENA); 1023 WM8903_CLK_SYS_ENA);
987 1024
1025 /* Change DC servo dither level in startup sequence */
1026 wm8903_write(codec, WM8903_WRITE_SEQUENCER_0, 0x11);
1027 wm8903_write(codec, WM8903_WRITE_SEQUENCER_1, 0x1257);
1028 wm8903_write(codec, WM8903_WRITE_SEQUENCER_2, 0x2);
1029
988 wm8903_run_sequence(codec, 0); 1030 wm8903_run_sequence(codec, 0);
989 wm8903_sync_reg_cache(codec, codec->reg_cache); 1031 wm8903_sync_reg_cache(codec, codec->reg_cache);
990 1032
@@ -1277,14 +1319,8 @@ static int wm8903_startup(struct snd_pcm_substream *substream,
1277 if (wm8903->master_substream) { 1319 if (wm8903->master_substream) {
1278 master_runtime = wm8903->master_substream->runtime; 1320 master_runtime = wm8903->master_substream->runtime;
1279 1321
1280 dev_dbg(&i2c->dev, "Constraining to %d bits at %dHz\n", 1322 dev_dbg(&i2c->dev, "Constraining to %d bits\n",
1281 master_runtime->sample_bits, 1323 master_runtime->sample_bits);
1282 master_runtime->rate);
1283
1284 snd_pcm_hw_constraint_minmax(substream->runtime,
1285 SNDRV_PCM_HW_PARAM_RATE,
1286 master_runtime->rate,
1287 master_runtime->rate);
1288 1324
1289 snd_pcm_hw_constraint_minmax(substream->runtime, 1325 snd_pcm_hw_constraint_minmax(substream->runtime,
1290 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, 1326 SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
@@ -1523,6 +1559,7 @@ struct snd_soc_dai wm8903_dai = {
1523 .formats = WM8903_FORMATS, 1559 .formats = WM8903_FORMATS,
1524 }, 1560 },
1525 .ops = &wm8903_dai_ops, 1561 .ops = &wm8903_dai_ops,
1562 .symmetric_rates = 1,
1526}; 1563};
1527EXPORT_SYMBOL_GPL(wm8903_dai); 1564EXPORT_SYMBOL_GPL(wm8903_dai);
1528 1565
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
new file mode 100644
index 000000000000..b8e17d6bc1f7
--- /dev/null
+++ b/sound/soc/codecs/wm8940.c
@@ -0,0 +1,955 @@
1/*
2 * wm8940.c -- WM8940 ALSA Soc Audio driver
3 *
4 * Author: Jonathan Cameron <jic23@cam.ac.uk>
5 *
6 * Based on wm8510.c
7 * Copyright 2006 Wolfson Microelectronics PLC.
8 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Not currently handled:
15 * Notch filter control
16 * AUXMode (inverting vs mixer)
17 * No means to obtain current gain if alc enabled.
18 * No use made of gpio
19 * Fast VMID discharge for power down
20 * Soft Start
21 * DLR and ALR Swaps not enabled
22 * Digital Sidetone not supported
23 */
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/pm.h>
30#include <linux/i2c.h>
31#include <linux/platform_device.h>
32#include <linux/spi/spi.h>
33#include <sound/core.h>
34#include <sound/pcm.h>
35#include <sound/pcm_params.h>
36#include <sound/soc.h>
37#include <sound/soc-dapm.h>
38#include <sound/initval.h>
39#include <sound/tlv.h>
40
41#include "wm8940.h"
42
43struct wm8940_priv {
44 unsigned int sysclk;
45 u16 reg_cache[WM8940_CACHEREGNUM];
46 struct snd_soc_codec codec;
47};
48
49static u16 wm8940_reg_defaults[] = {
50 0x8940, /* Soft Reset */
51 0x0000, /* Power 1 */
52 0x0000, /* Power 2 */
53 0x0000, /* Power 3 */
54 0x0010, /* Interface Control */
55 0x0000, /* Companding Control */
56 0x0140, /* Clock Control */
57 0x0000, /* Additional Controls */
58 0x0000, /* GPIO Control */
59 0x0002, /* Auto Increment Control */
60 0x0000, /* DAC Control */
61 0x00FF, /* DAC Volume */
62 0,
63 0,
64 0x0100, /* ADC Control */
65 0x00FF, /* ADC Volume */
66 0x0000, /* Notch Filter 1 Control 1 */
67 0x0000, /* Notch Filter 1 Control 2 */
68 0x0000, /* Notch Filter 2 Control 1 */
69 0x0000, /* Notch Filter 2 Control 2 */
70 0x0000, /* Notch Filter 3 Control 1 */
71 0x0000, /* Notch Filter 3 Control 2 */
72 0x0000, /* Notch Filter 4 Control 1 */
73 0x0000, /* Notch Filter 4 Control 2 */
74 0x0032, /* DAC Limit Control 1 */
75 0x0000, /* DAC Limit Control 2 */
76 0,
77 0,
78 0,
79 0,
80 0,
81 0,
82 0x0038, /* ALC Control 1 */
83 0x000B, /* ALC Control 2 */
84 0x0032, /* ALC Control 3 */
85 0x0000, /* Noise Gate */
86 0x0041, /* PLLN */
87 0x000C, /* PLLK1 */
88 0x0093, /* PLLK2 */
89 0x00E9, /* PLLK3 */
90 0,
91 0,
92 0x0030, /* ALC Control 4 */
93 0,
94 0x0002, /* Input Control */
95 0x0050, /* PGA Gain */
96 0,
97 0x0002, /* ADC Boost Control */
98 0,
99 0x0002, /* Output Control */
100 0x0000, /* Speaker Mixer Control */
101 0,
102 0,
103 0,
104 0x0079, /* Speaker Volume */
105 0,
106 0x0000, /* Mono Mixer Control */
107};
108
109static inline unsigned int wm8940_read_reg_cache(struct snd_soc_codec *codec,
110 unsigned int reg)
111{
112 u16 *cache = codec->reg_cache;
113
114 if (reg >= ARRAY_SIZE(wm8940_reg_defaults))
115 return -1;
116
117 return cache[reg];
118}
119
120static inline int wm8940_write_reg_cache(struct snd_soc_codec *codec,
121 u16 reg, unsigned int value)
122{
123 u16 *cache = codec->reg_cache;
124
125 if (reg >= ARRAY_SIZE(wm8940_reg_defaults))
126 return -1;
127
128 cache[reg] = value;
129
130 return 0;
131}
132
133static int wm8940_write(struct snd_soc_codec *codec, unsigned int reg,
134 unsigned int value)
135{
136 int ret;
137 u8 data[3] = { reg,
138 (value & 0xff00) >> 8,
139 (value & 0x00ff)
140 };
141
142 wm8940_write_reg_cache(codec, reg, value);
143
144 ret = codec->hw_write(codec->control_data, data, 3);
145
146 if (ret < 0)
147 return ret;
148 else if (ret != 3)
149 return -EIO;
150 return 0;
151}
152
153static const char *wm8940_companding[] = { "Off", "NC", "u-law", "A-law" };
154static const struct soc_enum wm8940_adc_companding_enum
155= SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 1, 4, wm8940_companding);
156static const struct soc_enum wm8940_dac_companding_enum
157= SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 3, 4, wm8940_companding);
158
159static const char *wm8940_alc_mode_text[] = {"ALC", "Limiter"};
160static const struct soc_enum wm8940_alc_mode_enum
161= SOC_ENUM_SINGLE(WM8940_ALC3, 8, 2, wm8940_alc_mode_text);
162
163static const char *wm8940_mic_bias_level_text[] = {"0.9", "0.65"};
164static const struct soc_enum wm8940_mic_bias_level_enum
165= SOC_ENUM_SINGLE(WM8940_INPUTCTL, 8, 2, wm8940_mic_bias_level_text);
166
167static const char *wm8940_filter_mode_text[] = {"Audio", "Application"};
168static const struct soc_enum wm8940_filter_mode_enum
169= SOC_ENUM_SINGLE(WM8940_ADC, 7, 2, wm8940_filter_mode_text);
170
171static DECLARE_TLV_DB_SCALE(wm8940_spk_vol_tlv, -5700, 100, 1);
172static DECLARE_TLV_DB_SCALE(wm8940_att_tlv, -1000, 1000, 0);
173static DECLARE_TLV_DB_SCALE(wm8940_pga_vol_tlv, -1200, 75, 0);
174static DECLARE_TLV_DB_SCALE(wm8940_alc_min_tlv, -1200, 600, 0);
175static DECLARE_TLV_DB_SCALE(wm8940_alc_max_tlv, 675, 600, 0);
176static DECLARE_TLV_DB_SCALE(wm8940_alc_tar_tlv, -2250, 50, 0);
177static DECLARE_TLV_DB_SCALE(wm8940_lim_boost_tlv, 0, 100, 0);
178static DECLARE_TLV_DB_SCALE(wm8940_lim_thresh_tlv, -600, 100, 0);
179static DECLARE_TLV_DB_SCALE(wm8940_adc_tlv, -12750, 50, 1);
180static DECLARE_TLV_DB_SCALE(wm8940_capture_boost_vol_tlv, 0, 2000, 0);
181
182static const struct snd_kcontrol_new wm8940_snd_controls[] = {
183 SOC_SINGLE("Digital Loopback Switch", WM8940_COMPANDINGCTL,
184 6, 1, 0),
185 SOC_ENUM("DAC Companding", wm8940_dac_companding_enum),
186 SOC_ENUM("ADC Companding", wm8940_adc_companding_enum),
187
188 SOC_ENUM("ALC Mode", wm8940_alc_mode_enum),
189 SOC_SINGLE("ALC Switch", WM8940_ALC1, 8, 1, 0),
190 SOC_SINGLE_TLV("ALC Capture Max Gain", WM8940_ALC1,
191 3, 7, 1, wm8940_alc_max_tlv),
192 SOC_SINGLE_TLV("ALC Capture Min Gain", WM8940_ALC1,
193 0, 7, 0, wm8940_alc_min_tlv),
194 SOC_SINGLE_TLV("ALC Capture Target", WM8940_ALC2,
195 0, 14, 0, wm8940_alc_tar_tlv),
196 SOC_SINGLE("ALC Capture Hold", WM8940_ALC2, 4, 10, 0),
197 SOC_SINGLE("ALC Capture Decay", WM8940_ALC3, 4, 10, 0),
198 SOC_SINGLE("ALC Capture Attach", WM8940_ALC3, 0, 10, 0),
199 SOC_SINGLE("ALC ZC Switch", WM8940_ALC4, 1, 1, 0),
200 SOC_SINGLE("ALC Capture Noise Gate Switch", WM8940_NOISEGATE,
201 3, 1, 0),
202 SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8940_NOISEGATE,
203 0, 7, 0),
204
205 SOC_SINGLE("DAC Playback Limiter Switch", WM8940_DACLIM1, 8, 1, 0),
206 SOC_SINGLE("DAC Playback Limiter Attack", WM8940_DACLIM1, 0, 9, 0),
207 SOC_SINGLE("DAC Playback Limiter Decay", WM8940_DACLIM1, 4, 11, 0),
208 SOC_SINGLE_TLV("DAC Playback Limiter Threshold", WM8940_DACLIM2,
209 4, 9, 1, wm8940_lim_thresh_tlv),
210 SOC_SINGLE_TLV("DAC Playback Limiter Boost", WM8940_DACLIM2,
211 0, 12, 0, wm8940_lim_boost_tlv),
212
213 SOC_SINGLE("Capture PGA ZC Switch", WM8940_PGAGAIN, 7, 1, 0),
214 SOC_SINGLE_TLV("Capture PGA Volume", WM8940_PGAGAIN,
215 0, 63, 0, wm8940_pga_vol_tlv),
216 SOC_SINGLE_TLV("Digital Playback Volume", WM8940_DACVOL,
217 0, 255, 0, wm8940_adc_tlv),
218 SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL,
219 0, 255, 0, wm8940_adc_tlv),
220 SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum),
221 SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST,
222 8, 1, 0, wm8940_capture_boost_vol_tlv),
223 SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL,
224 0, 63, 0, wm8940_spk_vol_tlv),
225 SOC_SINGLE("Speaker Playback Switch", WM8940_SPKVOL, 6, 1, 1),
226
227 SOC_SINGLE_TLV("Speaker Mixer Line Bypass Volume", WM8940_SPKVOL,
228 8, 1, 1, wm8940_att_tlv),
229 SOC_SINGLE("Speaker Playback ZC Switch", WM8940_SPKVOL, 7, 1, 0),
230
231 SOC_SINGLE("Mono Out Switch", WM8940_MONOMIX, 6, 1, 1),
232 SOC_SINGLE_TLV("Mono Mixer Line Bypass Volume", WM8940_MONOMIX,
233 7, 1, 1, wm8940_att_tlv),
234
235 SOC_SINGLE("High Pass Filter Switch", WM8940_ADC, 8, 1, 0),
236 SOC_ENUM("High Pass Filter Mode", wm8940_filter_mode_enum),
237 SOC_SINGLE("High Pass Filter Cut Off", WM8940_ADC, 4, 7, 0),
238 SOC_SINGLE("ADC Inversion Switch", WM8940_ADC, 0, 1, 0),
239 SOC_SINGLE("DAC Inversion Switch", WM8940_DAC, 0, 1, 0),
240 SOC_SINGLE("DAC Auto Mute Switch", WM8940_DAC, 2, 1, 0),
241 SOC_SINGLE("ZC Timeout Clock Switch", WM8940_ADDCNTRL, 0, 1, 0),
242};
243
244static const struct snd_kcontrol_new wm8940_speaker_mixer_controls[] = {
245 SOC_DAPM_SINGLE("Line Bypass Switch", WM8940_SPKMIX, 1, 1, 0),
246 SOC_DAPM_SINGLE("Aux Playback Switch", WM8940_SPKMIX, 5, 1, 0),
247 SOC_DAPM_SINGLE("PCM Playback Switch", WM8940_SPKMIX, 0, 1, 0),
248};
249
250static const struct snd_kcontrol_new wm8940_mono_mixer_controls[] = {
251 SOC_DAPM_SINGLE("Line Bypass Switch", WM8940_MONOMIX, 1, 1, 0),
252 SOC_DAPM_SINGLE("Aux Playback Switch", WM8940_MONOMIX, 2, 1, 0),
253 SOC_DAPM_SINGLE("PCM Playback Switch", WM8940_MONOMIX, 0, 1, 0),
254};
255
256static DECLARE_TLV_DB_SCALE(wm8940_boost_vol_tlv, -1500, 300, 1);
257static const struct snd_kcontrol_new wm8940_input_boost_controls[] = {
258 SOC_DAPM_SINGLE("Mic PGA Switch", WM8940_PGAGAIN, 6, 1, 1),
259 SOC_DAPM_SINGLE_TLV("Aux Volume", WM8940_ADCBOOST,
260 0, 7, 0, wm8940_boost_vol_tlv),
261 SOC_DAPM_SINGLE_TLV("Mic Volume", WM8940_ADCBOOST,
262 4, 7, 0, wm8940_boost_vol_tlv),
263};
264
265static const struct snd_kcontrol_new wm8940_micpga_controls[] = {
266 SOC_DAPM_SINGLE("AUX Switch", WM8940_INPUTCTL, 2, 1, 0),
267 SOC_DAPM_SINGLE("MICP Switch", WM8940_INPUTCTL, 0, 1, 0),
268 SOC_DAPM_SINGLE("MICN Switch", WM8940_INPUTCTL, 1, 1, 0),
269};
270
271static const struct snd_soc_dapm_widget wm8940_dapm_widgets[] = {
272 SND_SOC_DAPM_MIXER("Speaker Mixer", WM8940_POWER3, 2, 0,
273 &wm8940_speaker_mixer_controls[0],
274 ARRAY_SIZE(wm8940_speaker_mixer_controls)),
275 SND_SOC_DAPM_MIXER("Mono Mixer", WM8940_POWER3, 3, 0,
276 &wm8940_mono_mixer_controls[0],
277 ARRAY_SIZE(wm8940_mono_mixer_controls)),
278 SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8940_POWER3, 0, 0),
279
280 SND_SOC_DAPM_PGA("SpkN Out", WM8940_POWER3, 5, 0, NULL, 0),
281 SND_SOC_DAPM_PGA("SpkP Out", WM8940_POWER3, 6, 0, NULL, 0),
282 SND_SOC_DAPM_PGA("Mono Out", WM8940_POWER3, 7, 0, NULL, 0),
283 SND_SOC_DAPM_OUTPUT("MONOOUT"),
284 SND_SOC_DAPM_OUTPUT("SPKOUTP"),
285 SND_SOC_DAPM_OUTPUT("SPKOUTN"),
286
287 SND_SOC_DAPM_PGA("Aux Input", WM8940_POWER1, 6, 0, NULL, 0),
288 SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8940_POWER2, 0, 0),
289 SND_SOC_DAPM_MIXER("Mic PGA", WM8940_POWER2, 2, 0,
290 &wm8940_micpga_controls[0],
291 ARRAY_SIZE(wm8940_micpga_controls)),
292 SND_SOC_DAPM_MIXER("Boost Mixer", WM8940_POWER2, 4, 0,
293 &wm8940_input_boost_controls[0],
294 ARRAY_SIZE(wm8940_input_boost_controls)),
295 SND_SOC_DAPM_MICBIAS("Mic Bias", WM8940_POWER1, 4, 0),
296
297 SND_SOC_DAPM_INPUT("MICN"),
298 SND_SOC_DAPM_INPUT("MICP"),
299 SND_SOC_DAPM_INPUT("AUX"),
300};
301
302static const struct snd_soc_dapm_route audio_map[] = {
303 /* Mono output mixer */
304 {"Mono Mixer", "PCM Playback Switch", "DAC"},
305 {"Mono Mixer", "Aux Playback Switch", "Aux Input"},
306 {"Mono Mixer", "Line Bypass Switch", "Boost Mixer"},
307
308 /* Speaker output mixer */
309 {"Speaker Mixer", "PCM Playback Switch", "DAC"},
310 {"Speaker Mixer", "Aux Playback Switch", "Aux Input"},
311 {"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"},
312
313 /* Outputs */
314 {"Mono Out", NULL, "Mono Mixer"},
315 {"MONOOUT", NULL, "Mono Out"},
316 {"SpkN Out", NULL, "Speaker Mixer"},
317 {"SpkP Out", NULL, "Speaker Mixer"},
318 {"SPKOUTN", NULL, "SpkN Out"},
319 {"SPKOUTP", NULL, "SpkP Out"},
320
321 /* Microphone PGA */
322 {"Mic PGA", "MICN Switch", "MICN"},
323 {"Mic PGA", "MICP Switch", "MICP"},
324 {"Mic PGA", "AUX Switch", "AUX"},
325
326 /* Boost Mixer */
327 {"Boost Mixer", "Mic PGA Switch", "Mic PGA"},
328 {"Boost Mixer", "Mic Volume", "MICP"},
329 {"Boost Mixer", "Aux Volume", "Aux Input"},
330
331 {"ADC", NULL, "Boost Mixer"},
332};
333
334static int wm8940_add_widgets(struct snd_soc_codec *codec)
335{
336 int ret;
337
338 ret = snd_soc_dapm_new_controls(codec, wm8940_dapm_widgets,
339 ARRAY_SIZE(wm8940_dapm_widgets));
340 if (ret)
341 goto error_ret;
342 ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
343 if (ret)
344 goto error_ret;
345 ret = snd_soc_dapm_new_widgets(codec);
346
347error_ret:
348 return ret;
349}
350
351#define wm8940_reset(c) wm8940_write(c, WM8940_SOFTRESET, 0);
352
353static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
354 unsigned int fmt)
355{
356 struct snd_soc_codec *codec = codec_dai->codec;
357 u16 iface = wm8940_read_reg_cache(codec, WM8940_IFACE) & 0xFE67;
358 u16 clk = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0x1fe;
359
360 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
361 case SND_SOC_DAIFMT_CBM_CFM:
362 clk |= 1;
363 break;
364 case SND_SOC_DAIFMT_CBS_CFS:
365 break;
366 default:
367 return -EINVAL;
368 }
369 wm8940_write(codec, WM8940_CLOCK, clk);
370
371 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
372 case SND_SOC_DAIFMT_I2S:
373 iface |= (2 << 3);
374 break;
375 case SND_SOC_DAIFMT_LEFT_J:
376 iface |= (1 << 3);
377 break;
378 case SND_SOC_DAIFMT_RIGHT_J:
379 break;
380 case SND_SOC_DAIFMT_DSP_A:
381 iface |= (3 << 3);
382 break;
383 case SND_SOC_DAIFMT_DSP_B:
384 iface |= (3 << 3) | (1 << 7);
385 break;
386 }
387
388 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
389 case SND_SOC_DAIFMT_NB_NF:
390 break;
391 case SND_SOC_DAIFMT_NB_IF:
392 iface |= (1 << 7);
393 break;
394 case SND_SOC_DAIFMT_IB_NF:
395 iface |= (1 << 8);
396 break;
397 case SND_SOC_DAIFMT_IB_IF:
398 iface |= (1 << 8) | (1 << 7);
399 break;
400 }
401
402 wm8940_write(codec, WM8940_IFACE, iface);
403
404 return 0;
405}
406
407static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
408 struct snd_pcm_hw_params *params,
409 struct snd_soc_dai *dai)
410{
411 struct snd_soc_pcm_runtime *rtd = substream->private_data;
412 struct snd_soc_device *socdev = rtd->socdev;
413 struct snd_soc_codec *codec = socdev->card->codec;
414 u16 iface = wm8940_read_reg_cache(codec, WM8940_IFACE) & 0xFD9F;
415 u16 addcntrl = wm8940_read_reg_cache(codec, WM8940_ADDCNTRL) & 0xFFF1;
416 u16 companding = wm8940_read_reg_cache(codec,
417 WM8940_COMPANDINGCTL) & 0xFFDF;
418 int ret;
419
420 /* LoutR control */
421 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE
422 && params_channels(params) == 2)
423 iface |= (1 << 9);
424
425 switch (params_rate(params)) {
426 case SNDRV_PCM_RATE_8000:
427 addcntrl |= (0x5 << 1);
428 break;
429 case SNDRV_PCM_RATE_11025:
430 addcntrl |= (0x4 << 1);
431 break;
432 case SNDRV_PCM_RATE_16000:
433 addcntrl |= (0x3 << 1);
434 break;
435 case SNDRV_PCM_RATE_22050:
436 addcntrl |= (0x2 << 1);
437 break;
438 case SNDRV_PCM_RATE_32000:
439 addcntrl |= (0x1 << 1);
440 break;
441 case SNDRV_PCM_RATE_44100:
442 case SNDRV_PCM_RATE_48000:
443 break;
444 }
445 ret = wm8940_write(codec, WM8940_ADDCNTRL, addcntrl);
446 if (ret)
447 goto error_ret;
448
449 switch (params_format(params)) {
450 case SNDRV_PCM_FORMAT_S8:
451 companding = companding | (1 << 5);
452 break;
453 case SNDRV_PCM_FORMAT_S16_LE:
454 break;
455 case SNDRV_PCM_FORMAT_S20_3LE:
456 iface |= (1 << 5);
457 break;
458 case SNDRV_PCM_FORMAT_S24_LE:
459 iface |= (2 << 5);
460 break;
461 case SNDRV_PCM_FORMAT_S32_LE:
462 iface |= (3 << 5);
463 break;
464 }
465 ret = wm8940_write(codec, WM8940_COMPANDINGCTL, companding);
466 if (ret)
467 goto error_ret;
468 ret = wm8940_write(codec, WM8940_IFACE, iface);
469
470error_ret:
471 return ret;
472}
473
474static int wm8940_mute(struct snd_soc_dai *dai, int mute)
475{
476 struct snd_soc_codec *codec = dai->codec;
477 u16 mute_reg = wm8940_read_reg_cache(codec, WM8940_DAC) & 0xffbf;
478
479 if (mute)
480 mute_reg |= 0x40;
481
482 return wm8940_write(codec, WM8940_DAC, mute_reg);
483}
484
485static int wm8940_set_bias_level(struct snd_soc_codec *codec,
486 enum snd_soc_bias_level level)
487{
488 u16 val;
489 u16 pwr_reg = wm8940_read_reg_cache(codec, WM8940_POWER1) & 0x1F0;
490 int ret = 0;
491
492 switch (level) {
493 case SND_SOC_BIAS_ON:
494 /* ensure bufioen and biasen */
495 pwr_reg |= (1 << 2) | (1 << 3);
496 /* Enable thermal shutdown */
497 val = wm8940_read_reg_cache(codec, WM8940_OUTPUTCTL);
498 ret = wm8940_write(codec, WM8940_OUTPUTCTL, val | 0x2);
499 if (ret)
500 break;
501 /* set vmid to 75k */
502 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x1);
503 break;
504 case SND_SOC_BIAS_PREPARE:
505 /* ensure bufioen and biasen */
506 pwr_reg |= (1 << 2) | (1 << 3);
507 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x1);
508 break;
509 case SND_SOC_BIAS_STANDBY:
510 /* ensure bufioen and biasen */
511 pwr_reg |= (1 << 2) | (1 << 3);
512 /* set vmid to 300k for standby */
513 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x2);
514 break;
515 case SND_SOC_BIAS_OFF:
516 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg);
517 break;
518 }
519
520 return ret;
521}
522
523struct pll_ {
524 unsigned int pre_scale:2;
525 unsigned int n:4;
526 unsigned int k;
527};
528
529static struct pll_ pll_div;
530
531/* The size in bits of the pll divide multiplied by 10
532 * to allow rounding later */
533#define FIXED_PLL_SIZE ((1 << 24) * 10)
534static void pll_factors(unsigned int target, unsigned int source)
535{
536 unsigned long long Kpart;
537 unsigned int K, Ndiv, Nmod;
538 /* The left shift ist to avoid accuracy loss when right shifting */
539 Ndiv = target / source;
540
541 if (Ndiv > 12) {
542 source <<= 1;
543 /* Multiply by 2 */
544 pll_div.pre_scale = 0;
545 Ndiv = target / source;
546 } else if (Ndiv < 3) {
547 source >>= 2;
548 /* Divide by 4 */
549 pll_div.pre_scale = 3;
550 Ndiv = target / source;
551 } else if (Ndiv < 6) {
552 source >>= 1;
553 /* divide by 2 */
554 pll_div.pre_scale = 2;
555 Ndiv = target / source;
556 } else
557 pll_div.pre_scale = 1;
558
559 if ((Ndiv < 6) || (Ndiv > 12))
560 printk(KERN_WARNING
561 "WM8940 N value %d outwith recommended range!d\n",
562 Ndiv);
563
564 pll_div.n = Ndiv;
565 Nmod = target % source;
566 Kpart = FIXED_PLL_SIZE * (long long)Nmod;
567
568 do_div(Kpart, source);
569
570 K = Kpart & 0xFFFFFFFF;
571
572 /* Check if we need to round */
573 if ((K % 10) >= 5)
574 K += 5;
575
576 /* Move down to proper range now rounding is done */
577 K /= 10;
578
579 pll_div.k = K;
580}
581
582/* Untested at the moment */
583static int wm8940_set_dai_pll(struct snd_soc_dai *codec_dai,
584 int pll_id, unsigned int freq_in, unsigned int freq_out)
585{
586 struct snd_soc_codec *codec = codec_dai->codec;
587 u16 reg;
588
589 /* Turn off PLL */
590 reg = wm8940_read_reg_cache(codec, WM8940_POWER1);
591 wm8940_write(codec, WM8940_POWER1, reg & 0x1df);
592
593 if (freq_in == 0 || freq_out == 0) {
594 /* Clock CODEC directly from MCLK */
595 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK);
596 wm8940_write(codec, WM8940_CLOCK, reg & 0x0ff);
597 /* Pll power down */
598 wm8940_write(codec, WM8940_PLLN, (1 << 7));
599 return 0;
600 }
601
602 /* Pll is followed by a frequency divide by 4 */
603 pll_factors(freq_out*4, freq_in);
604 if (pll_div.k)
605 wm8940_write(codec, WM8940_PLLN,
606 (pll_div.pre_scale << 4) | pll_div.n | (1 << 6));
607 else /* No factional component */
608 wm8940_write(codec, WM8940_PLLN,
609 (pll_div.pre_scale << 4) | pll_div.n);
610 wm8940_write(codec, WM8940_PLLK1, pll_div.k >> 18);
611 wm8940_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff);
612 wm8940_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff);
613 /* Enable the PLL */
614 reg = wm8940_read_reg_cache(codec, WM8940_POWER1);
615 wm8940_write(codec, WM8940_POWER1, reg | 0x020);
616
617 /* Run CODEC from PLL instead of MCLK */
618 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK);
619 wm8940_write(codec, WM8940_CLOCK, reg | 0x100);
620
621 return 0;
622}
623
624static int wm8940_set_dai_sysclk(struct snd_soc_dai *codec_dai,
625 int clk_id, unsigned int freq, int dir)
626{
627 struct snd_soc_codec *codec = codec_dai->codec;
628 struct wm8940_priv *wm8940 = codec->private_data;
629
630 switch (freq) {
631 case 11289600:
632 case 12000000:
633 case 12288000:
634 case 16934400:
635 case 18432000:
636 wm8940->sysclk = freq;
637 return 0;
638 }
639 return -EINVAL;
640}
641
642static int wm8940_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
643 int div_id, int div)
644{
645 struct snd_soc_codec *codec = codec_dai->codec;
646 u16 reg;
647 int ret = 0;
648
649 switch (div_id) {
650 case WM8940_BCLKDIV:
651 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0xFFEF3;
652 ret = wm8940_write(codec, WM8940_CLOCK, reg | (div << 2));
653 break;
654 case WM8940_MCLKDIV:
655 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0xFF1F;
656 ret = wm8940_write(codec, WM8940_CLOCK, reg | (div << 5));
657 break;
658 case WM8940_OPCLKDIV:
659 reg = wm8940_read_reg_cache(codec, WM8940_ADDCNTRL) & 0xFFCF;
660 ret = wm8940_write(codec, WM8940_ADDCNTRL, reg | (div << 4));
661 break;
662 }
663 return ret;
664}
665
666#define WM8940_RATES SNDRV_PCM_RATE_8000_48000
667
668#define WM8940_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
669 SNDRV_PCM_FMTBIT_S16_LE | \
670 SNDRV_PCM_FMTBIT_S20_3LE | \
671 SNDRV_PCM_FMTBIT_S24_LE | \
672 SNDRV_PCM_FMTBIT_S32_LE)
673
674static struct snd_soc_dai_ops wm8940_dai_ops = {
675 .hw_params = wm8940_i2s_hw_params,
676 .set_sysclk = wm8940_set_dai_sysclk,
677 .digital_mute = wm8940_mute,
678 .set_fmt = wm8940_set_dai_fmt,
679 .set_clkdiv = wm8940_set_dai_clkdiv,
680 .set_pll = wm8940_set_dai_pll,
681};
682
683struct snd_soc_dai wm8940_dai = {
684 .name = "WM8940",
685 .playback = {
686 .stream_name = "Playback",
687 .channels_min = 1,
688 .channels_max = 2,
689 .rates = WM8940_RATES,
690 .formats = WM8940_FORMATS,
691 },
692 .capture = {
693 .stream_name = "Capture",
694 .channels_min = 1,
695 .channels_max = 2,
696 .rates = WM8940_RATES,
697 .formats = WM8940_FORMATS,
698 },
699 .ops = &wm8940_dai_ops,
700 .symmetric_rates = 1,
701};
702EXPORT_SYMBOL_GPL(wm8940_dai);
703
704static int wm8940_suspend(struct platform_device *pdev, pm_message_t state)
705{
706 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
707 struct snd_soc_codec *codec = socdev->card->codec;
708
709 return wm8940_set_bias_level(codec, SND_SOC_BIAS_OFF);
710}
711
712static int wm8940_resume(struct platform_device *pdev)
713{
714 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
715 struct snd_soc_codec *codec = socdev->card->codec;
716 int i;
717 int ret;
718 u8 data[3];
719 u16 *cache = codec->reg_cache;
720
721 /* Sync reg_cache with the hardware
722 * Could use auto incremented writes to speed this up
723 */
724 for (i = 0; i < ARRAY_SIZE(wm8940_reg_defaults); i++) {
725 data[0] = i;
726 data[1] = (cache[i] & 0xFF00) >> 8;
727 data[2] = cache[i] & 0x00FF;
728 ret = codec->hw_write(codec->control_data, data, 3);
729 if (ret < 0)
730 goto error_ret;
731 else if (ret != 3) {
732 ret = -EIO;
733 goto error_ret;
734 }
735 }
736 ret = wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
737 if (ret)
738 goto error_ret;
739 ret = wm8940_set_bias_level(codec, codec->suspend_bias_level);
740
741error_ret:
742 return ret;
743}
744
745static struct snd_soc_codec *wm8940_codec;
746
747static int wm8940_probe(struct platform_device *pdev)
748{
749 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
750 struct snd_soc_codec *codec;
751
752 int ret = 0;
753
754 if (wm8940_codec == NULL) {
755 dev_err(&pdev->dev, "Codec device not registered\n");
756 return -ENODEV;
757 }
758
759 socdev->card->codec = wm8940_codec;
760 codec = wm8940_codec;
761
762 mutex_init(&codec->mutex);
763 /* register pcms */
764 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
765 if (ret < 0) {
766 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
767 goto pcm_err;
768 }
769
770 ret = snd_soc_add_controls(codec, wm8940_snd_controls,
771 ARRAY_SIZE(wm8940_snd_controls));
772 if (ret)
773 goto error_free_pcms;
774 ret = wm8940_add_widgets(codec);
775 if (ret)
776 goto error_free_pcms;
777
778 ret = snd_soc_init_card(socdev);
779 if (ret < 0) {
780 dev_err(codec->dev, "failed to register card: %d\n", ret);
781 goto error_free_pcms;
782 }
783
784 return ret;
785
786error_free_pcms:
787 snd_soc_free_pcms(socdev);
788 snd_soc_dapm_free(socdev);
789pcm_err:
790 return ret;
791}
792
793static int wm8940_remove(struct platform_device *pdev)
794{
795 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
796
797 snd_soc_free_pcms(socdev);
798 snd_soc_dapm_free(socdev);
799
800 return 0;
801}
802
803struct snd_soc_codec_device soc_codec_dev_wm8940 = {
804 .probe = wm8940_probe,
805 .remove = wm8940_remove,
806 .suspend = wm8940_suspend,
807 .resume = wm8940_resume,
808};
809EXPORT_SYMBOL_GPL(soc_codec_dev_wm8940);
810
811static int wm8940_register(struct wm8940_priv *wm8940)
812{
813 struct wm8940_setup_data *pdata = wm8940->codec.dev->platform_data;
814 struct snd_soc_codec *codec = &wm8940->codec;
815 int ret;
816 u16 reg;
817 if (wm8940_codec) {
818 dev_err(codec->dev, "Another WM8940 is registered\n");
819 return -EINVAL;
820 }
821
822 INIT_LIST_HEAD(&codec->dapm_widgets);
823 INIT_LIST_HEAD(&codec->dapm_paths);
824
825 codec->private_data = wm8940;
826 codec->name = "WM8940";
827 codec->owner = THIS_MODULE;
828 codec->read = wm8940_read_reg_cache;
829 codec->write = wm8940_write;
830 codec->bias_level = SND_SOC_BIAS_OFF;
831 codec->set_bias_level = wm8940_set_bias_level;
832 codec->dai = &wm8940_dai;
833 codec->num_dai = 1;
834 codec->reg_cache_size = ARRAY_SIZE(wm8940_reg_defaults);
835 codec->reg_cache = &wm8940->reg_cache;
836
837 memcpy(codec->reg_cache, wm8940_reg_defaults,
838 sizeof(wm8940_reg_defaults));
839
840 ret = wm8940_reset(codec);
841 if (ret < 0) {
842 dev_err(codec->dev, "Failed to issue reset\n");
843 return ret;
844 }
845
846 wm8940_dai.dev = codec->dev;
847
848 wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
849
850 ret = wm8940_write(codec, WM8940_POWER1, 0x180);
851 if (ret < 0)
852 return ret;
853
854 if (!pdata)
855 dev_warn(codec->dev, "No platform data supplied\n");
856 else {
857 reg = wm8940_read_reg_cache(codec, WM8940_OUTPUTCTL);
858 ret = wm8940_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi);
859 if (ret < 0)
860 return ret;
861 }
862
863
864 wm8940_codec = codec;
865
866 ret = snd_soc_register_codec(codec);
867 if (ret) {
868 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
869 return ret;
870 }
871
872 ret = snd_soc_register_dai(&wm8940_dai);
873 if (ret) {
874 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
875 snd_soc_unregister_codec(codec);
876 return ret;
877 }
878
879 return 0;
880}
881
882static void wm8940_unregister(struct wm8940_priv *wm8940)
883{
884 wm8940_set_bias_level(&wm8940->codec, SND_SOC_BIAS_OFF);
885 snd_soc_unregister_dai(&wm8940_dai);
886 snd_soc_unregister_codec(&wm8940->codec);
887 kfree(wm8940);
888 wm8940_codec = NULL;
889}
890
891static int wm8940_i2c_probe(struct i2c_client *i2c,
892 const struct i2c_device_id *id)
893{
894 struct wm8940_priv *wm8940;
895 struct snd_soc_codec *codec;
896
897 wm8940 = kzalloc(sizeof *wm8940, GFP_KERNEL);
898 if (wm8940 == NULL)
899 return -ENOMEM;
900
901 codec = &wm8940->codec;
902 codec->hw_write = (hw_write_t)i2c_master_send;
903 i2c_set_clientdata(i2c, wm8940);
904 codec->control_data = i2c;
905 codec->dev = &i2c->dev;
906
907 return wm8940_register(wm8940);
908}
909
910static int __devexit wm8940_i2c_remove(struct i2c_client *client)
911{
912 struct wm8940_priv *wm8940 = i2c_get_clientdata(client);
913
914 wm8940_unregister(wm8940);
915
916 return 0;
917}
918
919static const struct i2c_device_id wm8940_i2c_id[] = {
920 { "wm8940", 0 },
921 { }
922};
923MODULE_DEVICE_TABLE(i2c, wm8940_i2c_id);
924
925static struct i2c_driver wm8940_i2c_driver = {
926 .driver = {
927 .name = "WM8940 I2C Codec",
928 .owner = THIS_MODULE,
929 },
930 .probe = wm8940_i2c_probe,
931 .remove = __devexit_p(wm8940_i2c_remove),
932 .id_table = wm8940_i2c_id,
933};
934
935static int __init wm8940_modinit(void)
936{
937 int ret;
938
939 ret = i2c_add_driver(&wm8940_i2c_driver);
940 if (ret)
941 printk(KERN_ERR "Failed to register WM8940 I2C driver: %d\n",
942 ret);
943 return ret;
944}
945module_init(wm8940_modinit);
946
947static void __exit wm8940_exit(void)
948{
949 i2c_del_driver(&wm8940_i2c_driver);
950}
951module_exit(wm8940_exit);
952
953MODULE_DESCRIPTION("ASoC WM8940 driver");
954MODULE_AUTHOR("Jonathan Cameron");
955MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8940.h b/sound/soc/codecs/wm8940.h
new file mode 100644
index 000000000000..8410eed3ef84
--- /dev/null
+++ b/sound/soc/codecs/wm8940.h
@@ -0,0 +1,104 @@
1/*
2 * wm8940.h -- WM8940 Soc Audio driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _WM8940_H
10#define _WM8940_H
11
12struct wm8940_setup_data {
13 /* Vref to analogue output resistance */
14#define WM8940_VROI_1K 0
15#define WM8940_VROI_30K 1
16 unsigned int vroi:1;
17};
18extern struct snd_soc_dai wm8940_dai;
19extern struct snd_soc_codec_device soc_codec_dev_wm8940;
20
21/* WM8940 register space */
22#define WM8940_SOFTRESET 0x00
23#define WM8940_POWER1 0x01
24#define WM8940_POWER2 0x02
25#define WM8940_POWER3 0x03
26#define WM8940_IFACE 0x04
27#define WM8940_COMPANDINGCTL 0x05
28#define WM8940_CLOCK 0x06
29#define WM8940_ADDCNTRL 0x07
30#define WM8940_GPIO 0x08
31#define WM8940_CTLINT 0x09
32#define WM8940_DAC 0x0A
33#define WM8940_DACVOL 0x0B
34
35#define WM8940_ADC 0x0E
36#define WM8940_ADCVOL 0x0F
37#define WM8940_NOTCH1 0x10
38#define WM8940_NOTCH2 0x11
39#define WM8940_NOTCH3 0x12
40#define WM8940_NOTCH4 0x13
41#define WM8940_NOTCH5 0x14
42#define WM8940_NOTCH6 0x15
43#define WM8940_NOTCH7 0x16
44#define WM8940_NOTCH8 0x17
45#define WM8940_DACLIM1 0x18
46#define WM8940_DACLIM2 0x19
47
48#define WM8940_ALC1 0x20
49#define WM8940_ALC2 0x21
50#define WM8940_ALC3 0x22
51#define WM8940_NOISEGATE 0x23
52#define WM8940_PLLN 0x24
53#define WM8940_PLLK1 0x25
54#define WM8940_PLLK2 0x26
55#define WM8940_PLLK3 0x27
56
57#define WM8940_ALC4 0x2A
58
59#define WM8940_INPUTCTL 0x2C
60#define WM8940_PGAGAIN 0x2D
61
62#define WM8940_ADCBOOST 0x2F
63
64#define WM8940_OUTPUTCTL 0x31
65#define WM8940_SPKMIX 0x32
66
67#define WM8940_SPKVOL 0x36
68
69#define WM8940_MONOMIX 0x38
70
71#define WM8940_CACHEREGNUM 0x57
72
73
74/* Clock divider Id's */
75#define WM8940_BCLKDIV 0
76#define WM8940_MCLKDIV 1
77#define WM8940_OPCLKDIV 2
78
79/* MCLK clock dividers */
80#define WM8940_MCLKDIV_1 0
81#define WM8940_MCLKDIV_1_5 1
82#define WM8940_MCLKDIV_2 2
83#define WM8940_MCLKDIV_3 3
84#define WM8940_MCLKDIV_4 4
85#define WM8940_MCLKDIV_6 5
86#define WM8940_MCLKDIV_8 6
87#define WM8940_MCLKDIV_12 7
88
89/* BCLK clock dividers */
90#define WM8940_BCLKDIV_1 0
91#define WM8940_BCLKDIV_2 1
92#define WM8940_BCLKDIV_4 2
93#define WM8940_BCLKDIV_8 3
94#define WM8940_BCLKDIV_16 4
95#define WM8940_BCLKDIV_32 5
96
97/* PLL Out Dividers */
98#define WM8940_OPCLKDIV_1 0
99#define WM8940_OPCLKDIV_2 1
100#define WM8940_OPCLKDIV_3 2
101#define WM8940_OPCLKDIV_4 3
102
103#endif /* _WM8940_H */
104
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
new file mode 100644
index 000000000000..e224d8add170
--- /dev/null
+++ b/sound/soc/codecs/wm8960.c
@@ -0,0 +1,969 @@
1/*
2 * wm8960.c -- WM8960 ALSA SoC Audio driver
3 *
4 * Author: Liam Girdwood
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/pm.h>
16#include <linux/i2c.h>
17#include <linux/platform_device.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include <sound/soc-dapm.h>
23#include <sound/initval.h>
24#include <sound/tlv.h>
25
26#include "wm8960.h"
27
28#define AUDIO_NAME "wm8960"
29
30struct snd_soc_codec_device soc_codec_dev_wm8960;
31
32/* R25 - Power 1 */
33#define WM8960_VREF 0x40
34
35/* R28 - Anti-pop 1 */
36#define WM8960_POBCTRL 0x80
37#define WM8960_BUFDCOPEN 0x10
38#define WM8960_BUFIOEN 0x08
39#define WM8960_SOFT_ST 0x04
40#define WM8960_HPSTBY 0x01
41
42/* R29 - Anti-pop 2 */
43#define WM8960_DISOP 0x40
44
45/*
46 * wm8960 register cache
47 * We can't read the WM8960 register space when we are
48 * using 2 wire for device control, so we cache them instead.
49 */
50static const u16 wm8960_reg[WM8960_CACHEREGNUM] = {
51 0x0097, 0x0097, 0x0000, 0x0000,
52 0x0000, 0x0008, 0x0000, 0x000a,
53 0x01c0, 0x0000, 0x00ff, 0x00ff,
54 0x0000, 0x0000, 0x0000, 0x0000,
55 0x0000, 0x007b, 0x0100, 0x0032,
56 0x0000, 0x00c3, 0x00c3, 0x01c0,
57 0x0000, 0x0000, 0x0000, 0x0000,
58 0x0000, 0x0000, 0x0000, 0x0000,
59 0x0100, 0x0100, 0x0050, 0x0050,
60 0x0050, 0x0050, 0x0000, 0x0000,
61 0x0000, 0x0000, 0x0040, 0x0000,
62 0x0000, 0x0050, 0x0050, 0x0000,
63 0x0002, 0x0037, 0x004d, 0x0080,
64 0x0008, 0x0031, 0x0026, 0x00e9,
65};
66
67struct wm8960_priv {
68 u16 reg_cache[WM8960_CACHEREGNUM];
69 struct snd_soc_codec codec;
70};
71
72/*
73 * read wm8960 register cache
74 */
75static inline unsigned int wm8960_read_reg_cache(struct snd_soc_codec *codec,
76 unsigned int reg)
77{
78 u16 *cache = codec->reg_cache;
79 if (reg == WM8960_RESET)
80 return 0;
81 if (reg >= WM8960_CACHEREGNUM)
82 return -1;
83 return cache[reg];
84}
85
86/*
87 * write wm8960 register cache
88 */
89static inline void wm8960_write_reg_cache(struct snd_soc_codec *codec,
90 u16 reg, unsigned int value)
91{
92 u16 *cache = codec->reg_cache;
93 if (reg >= WM8960_CACHEREGNUM)
94 return;
95 cache[reg] = value;
96}
97
98static inline unsigned int wm8960_read(struct snd_soc_codec *codec,
99 unsigned int reg)
100{
101 return wm8960_read_reg_cache(codec, reg);
102}
103
104/*
105 * write to the WM8960 register space
106 */
107static int wm8960_write(struct snd_soc_codec *codec, unsigned int reg,
108 unsigned int value)
109{
110 u8 data[2];
111
112 /* data is
113 * D15..D9 WM8960 register offset
114 * D8...D0 register data
115 */
116 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
117 data[1] = value & 0x00ff;
118
119 wm8960_write_reg_cache(codec, reg, value);
120 if (codec->hw_write(codec->control_data, data, 2) == 2)
121 return 0;
122 else
123 return -EIO;
124}
125
126#define wm8960_reset(c) wm8960_write(c, WM8960_RESET, 0)
127
128/* enumerated controls */
129static const char *wm8960_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
130static const char *wm8960_polarity[] = {"No Inversion", "Left Inverted",
131 "Right Inverted", "Stereo Inversion"};
132static const char *wm8960_3d_upper_cutoff[] = {"High", "Low"};
133static const char *wm8960_3d_lower_cutoff[] = {"Low", "High"};
134static const char *wm8960_alcfunc[] = {"Off", "Right", "Left", "Stereo"};
135static const char *wm8960_alcmode[] = {"ALC", "Limiter"};
136
137static const struct soc_enum wm8960_enum[] = {
138 SOC_ENUM_SINGLE(WM8960_DACCTL1, 1, 4, wm8960_deemph),
139 SOC_ENUM_SINGLE(WM8960_DACCTL1, 5, 4, wm8960_polarity),
140 SOC_ENUM_SINGLE(WM8960_DACCTL2, 5, 4, wm8960_polarity),
141 SOC_ENUM_SINGLE(WM8960_3D, 6, 2, wm8960_3d_upper_cutoff),
142 SOC_ENUM_SINGLE(WM8960_3D, 5, 2, wm8960_3d_lower_cutoff),
143 SOC_ENUM_SINGLE(WM8960_ALC1, 7, 4, wm8960_alcfunc),
144 SOC_ENUM_SINGLE(WM8960_ALC3, 8, 2, wm8960_alcmode),
145};
146
147static const DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 50, 0);
148static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 50, 1);
149static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0);
150static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
151
152static const struct snd_kcontrol_new wm8960_snd_controls[] = {
153SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
154 0, 63, 0, adc_tlv),
155SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
156 6, 1, 0),
157SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
158 7, 1, 0),
159
160SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC,
161 0, 255, 0, dac_tlv),
162
163SOC_DOUBLE_R_TLV("Headphone Playback Volume", WM8960_LOUT1, WM8960_ROUT1,
164 0, 127, 0, out_tlv),
165SOC_DOUBLE_R("Headphone Playback ZC Switch", WM8960_LOUT1, WM8960_ROUT1,
166 7, 1, 0),
167
168SOC_DOUBLE_R_TLV("Speaker Playback Volume", WM8960_LOUT2, WM8960_ROUT2,
169 0, 127, 0, out_tlv),
170SOC_DOUBLE_R("Speaker Playback ZC Switch", WM8960_LOUT2, WM8960_ROUT2,
171 7, 1, 0),
172SOC_SINGLE("Speaker DC Volume", WM8960_CLASSD3, 3, 5, 0),
173SOC_SINGLE("Speaker AC Volume", WM8960_CLASSD3, 0, 5, 0),
174
175SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
176SOC_ENUM("ADC Polarity", wm8960_enum[1]),
177SOC_ENUM("Playback De-emphasis", wm8960_enum[0]),
178SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
179
180SOC_ENUM("DAC Polarity", wm8960_enum[2]),
181
182SOC_ENUM("3D Filter Upper Cut-Off", wm8960_enum[3]),
183SOC_ENUM("3D Filter Lower Cut-Off", wm8960_enum[4]),
184SOC_SINGLE("3D Volume", WM8960_3D, 1, 15, 0),
185SOC_SINGLE("3D Switch", WM8960_3D, 0, 1, 0),
186
187SOC_ENUM("ALC Function", wm8960_enum[5]),
188SOC_SINGLE("ALC Max Gain", WM8960_ALC1, 4, 7, 0),
189SOC_SINGLE("ALC Target", WM8960_ALC1, 0, 15, 1),
190SOC_SINGLE("ALC Min Gain", WM8960_ALC2, 4, 7, 0),
191SOC_SINGLE("ALC Hold Time", WM8960_ALC2, 0, 15, 0),
192SOC_ENUM("ALC Mode", wm8960_enum[6]),
193SOC_SINGLE("ALC Decay", WM8960_ALC3, 4, 15, 0),
194SOC_SINGLE("ALC Attack", WM8960_ALC3, 0, 15, 0),
195
196SOC_SINGLE("Noise Gate Threshold", WM8960_NOISEG, 3, 31, 0),
197SOC_SINGLE("Noise Gate Switch", WM8960_NOISEG, 0, 1, 0),
198
199SOC_DOUBLE_R("ADC PCM Capture Volume", WM8960_LINPATH, WM8960_RINPATH,
200 0, 127, 0),
201
202SOC_SINGLE_TLV("Left Output Mixer Boost Bypass Volume",
203 WM8960_BYPASS1, 4, 7, 1, bypass_tlv),
204SOC_SINGLE_TLV("Left Output Mixer LINPUT3 Volume",
205 WM8960_LOUTMIX, 4, 7, 1, bypass_tlv),
206SOC_SINGLE_TLV("Right Output Mixer Boost Bypass Volume",
207 WM8960_BYPASS2, 4, 7, 1, bypass_tlv),
208SOC_SINGLE_TLV("Right Output Mixer RINPUT3 Volume",
209 WM8960_ROUTMIX, 4, 7, 1, bypass_tlv),
210};
211
212static const struct snd_kcontrol_new wm8960_lin_boost[] = {
213SOC_DAPM_SINGLE("LINPUT2 Switch", WM8960_LINPATH, 6, 1, 0),
214SOC_DAPM_SINGLE("LINPUT3 Switch", WM8960_LINPATH, 7, 1, 0),
215SOC_DAPM_SINGLE("LINPUT1 Switch", WM8960_LINPATH, 8, 1, 0),
216};
217
218static const struct snd_kcontrol_new wm8960_lin[] = {
219SOC_DAPM_SINGLE("Boost Switch", WM8960_LINPATH, 3, 1, 0),
220};
221
222static const struct snd_kcontrol_new wm8960_rin_boost[] = {
223SOC_DAPM_SINGLE("RINPUT2 Switch", WM8960_RINPATH, 6, 1, 0),
224SOC_DAPM_SINGLE("RINPUT3 Switch", WM8960_RINPATH, 7, 1, 0),
225SOC_DAPM_SINGLE("RINPUT1 Switch", WM8960_RINPATH, 8, 1, 0),
226};
227
228static const struct snd_kcontrol_new wm8960_rin[] = {
229SOC_DAPM_SINGLE("Boost Switch", WM8960_RINPATH, 3, 1, 0),
230};
231
232static const struct snd_kcontrol_new wm8960_loutput_mixer[] = {
233SOC_DAPM_SINGLE("PCM Playback Switch", WM8960_LOUTMIX, 8, 1, 0),
234SOC_DAPM_SINGLE("LINPUT3 Switch", WM8960_LOUTMIX, 7, 1, 0),
235SOC_DAPM_SINGLE("Boost Bypass Switch", WM8960_BYPASS1, 7, 1, 0),
236};
237
238static const struct snd_kcontrol_new wm8960_routput_mixer[] = {
239SOC_DAPM_SINGLE("PCM Playback Switch", WM8960_ROUTMIX, 8, 1, 0),
240SOC_DAPM_SINGLE("RINPUT3 Switch", WM8960_ROUTMIX, 7, 1, 0),
241SOC_DAPM_SINGLE("Boost Bypass Switch", WM8960_BYPASS2, 7, 1, 0),
242};
243
244static const struct snd_kcontrol_new wm8960_mono_out[] = {
245SOC_DAPM_SINGLE("Left Switch", WM8960_MONOMIX1, 7, 1, 0),
246SOC_DAPM_SINGLE("Right Switch", WM8960_MONOMIX2, 7, 1, 0),
247};
248
249static const struct snd_soc_dapm_widget wm8960_dapm_widgets[] = {
250SND_SOC_DAPM_INPUT("LINPUT1"),
251SND_SOC_DAPM_INPUT("RINPUT1"),
252SND_SOC_DAPM_INPUT("LINPUT2"),
253SND_SOC_DAPM_INPUT("RINPUT2"),
254SND_SOC_DAPM_INPUT("LINPUT3"),
255SND_SOC_DAPM_INPUT("RINPUT3"),
256
257SND_SOC_DAPM_MICBIAS("MICB", WM8960_POWER1, 1, 0),
258
259SND_SOC_DAPM_MIXER("Left Boost Mixer", WM8960_POWER1, 5, 0,
260 wm8960_lin_boost, ARRAY_SIZE(wm8960_lin_boost)),
261SND_SOC_DAPM_MIXER("Right Boost Mixer", WM8960_POWER1, 4, 0,
262 wm8960_rin_boost, ARRAY_SIZE(wm8960_rin_boost)),
263
264SND_SOC_DAPM_MIXER("Left Input Mixer", WM8960_POWER3, 5, 0,
265 wm8960_lin, ARRAY_SIZE(wm8960_lin)),
266SND_SOC_DAPM_MIXER("Right Input Mixer", WM8960_POWER3, 4, 0,
267 wm8960_rin, ARRAY_SIZE(wm8960_rin)),
268
269SND_SOC_DAPM_ADC("Left ADC", "Capture", WM8960_POWER2, 3, 0),
270SND_SOC_DAPM_ADC("Right ADC", "Capture", WM8960_POWER2, 2, 0),
271
272SND_SOC_DAPM_DAC("Left DAC", "Playback", WM8960_POWER2, 8, 0),
273SND_SOC_DAPM_DAC("Right DAC", "Playback", WM8960_POWER2, 7, 0),
274
275SND_SOC_DAPM_MIXER("Left Output Mixer", WM8960_POWER3, 3, 0,
276 &wm8960_loutput_mixer[0],
277 ARRAY_SIZE(wm8960_loutput_mixer)),
278SND_SOC_DAPM_MIXER("Right Output Mixer", WM8960_POWER3, 2, 0,
279 &wm8960_routput_mixer[0],
280 ARRAY_SIZE(wm8960_routput_mixer)),
281
282SND_SOC_DAPM_MIXER("Mono Output Mixer", WM8960_POWER2, 1, 0,
283 &wm8960_mono_out[0],
284 ARRAY_SIZE(wm8960_mono_out)),
285
286SND_SOC_DAPM_PGA("LOUT1 PGA", WM8960_POWER2, 6, 0, NULL, 0),
287SND_SOC_DAPM_PGA("ROUT1 PGA", WM8960_POWER2, 5, 0, NULL, 0),
288
289SND_SOC_DAPM_PGA("Left Speaker PGA", WM8960_POWER2, 4, 0, NULL, 0),
290SND_SOC_DAPM_PGA("Right Speaker PGA", WM8960_POWER2, 3, 0, NULL, 0),
291
292SND_SOC_DAPM_PGA("Right Speaker Output", WM8960_CLASSD1, 7, 0, NULL, 0),
293SND_SOC_DAPM_PGA("Left Speaker Output", WM8960_CLASSD1, 6, 0, NULL, 0),
294
295SND_SOC_DAPM_OUTPUT("SPK_LP"),
296SND_SOC_DAPM_OUTPUT("SPK_LN"),
297SND_SOC_DAPM_OUTPUT("HP_L"),
298SND_SOC_DAPM_OUTPUT("HP_R"),
299SND_SOC_DAPM_OUTPUT("SPK_RP"),
300SND_SOC_DAPM_OUTPUT("SPK_RN"),
301SND_SOC_DAPM_OUTPUT("OUT3"),
302};
303
304static const struct snd_soc_dapm_route audio_paths[] = {
305 { "Left Boost Mixer", "LINPUT1 Switch", "LINPUT1" },
306 { "Left Boost Mixer", "LINPUT2 Switch", "LINPUT2" },
307 { "Left Boost Mixer", "LINPUT3 Switch", "LINPUT3" },
308
309 { "Left Input Mixer", "Boost Switch", "Left Boost Mixer", },
310 { "Left Input Mixer", NULL, "LINPUT1", }, /* Really Boost Switch */
311 { "Left Input Mixer", NULL, "LINPUT2" },
312 { "Left Input Mixer", NULL, "LINPUT3" },
313
314 { "Right Boost Mixer", "RINPUT1 Switch", "RINPUT1" },
315 { "Right Boost Mixer", "RINPUT2 Switch", "RINPUT2" },
316 { "Right Boost Mixer", "RINPUT3 Switch", "RINPUT3" },
317
318 { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
319 { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
320 { "Right Input Mixer", NULL, "RINPUT2" },
321 { "Right Input Mixer", NULL, "LINPUT3" },
322
323 { "Left ADC", NULL, "Left Input Mixer" },
324 { "Right ADC", NULL, "Right Input Mixer" },
325
326 { "Left Output Mixer", "LINPUT3 Switch", "LINPUT3" },
327 { "Left Output Mixer", "Boost Bypass Switch", "Left Boost Mixer"} ,
328 { "Left Output Mixer", "PCM Playback Switch", "Left DAC" },
329
330 { "Right Output Mixer", "RINPUT3 Switch", "RINPUT3" },
331 { "Right Output Mixer", "Boost Bypass Switch", "Right Boost Mixer" } ,
332 { "Right Output Mixer", "PCM Playback Switch", "Right DAC" },
333
334 { "Mono Output Mixer", "Left Switch", "Left Output Mixer" },
335 { "Mono Output Mixer", "Right Switch", "Right Output Mixer" },
336
337 { "LOUT1 PGA", NULL, "Left Output Mixer" },
338 { "ROUT1 PGA", NULL, "Right Output Mixer" },
339
340 { "HP_L", NULL, "LOUT1 PGA" },
341 { "HP_R", NULL, "ROUT1 PGA" },
342
343 { "Left Speaker PGA", NULL, "Left Output Mixer" },
344 { "Right Speaker PGA", NULL, "Right Output Mixer" },
345
346 { "Left Speaker Output", NULL, "Left Speaker PGA" },
347 { "Right Speaker Output", NULL, "Right Speaker PGA" },
348
349 { "SPK_LN", NULL, "Left Speaker Output" },
350 { "SPK_LP", NULL, "Left Speaker Output" },
351 { "SPK_RN", NULL, "Right Speaker Output" },
352 { "SPK_RP", NULL, "Right Speaker Output" },
353
354 { "OUT3", NULL, "Mono Output Mixer", }
355};
356
357static int wm8960_add_widgets(struct snd_soc_codec *codec)
358{
359 snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets,
360 ARRAY_SIZE(wm8960_dapm_widgets));
361
362 snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
363
364 snd_soc_dapm_new_widgets(codec);
365 return 0;
366}
367
368static int wm8960_set_dai_fmt(struct snd_soc_dai *codec_dai,
369 unsigned int fmt)
370{
371 struct snd_soc_codec *codec = codec_dai->codec;
372 u16 iface = 0;
373
374 /* set master/slave audio interface */
375 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
376 case SND_SOC_DAIFMT_CBM_CFM:
377 iface |= 0x0040;
378 break;
379 case SND_SOC_DAIFMT_CBS_CFS:
380 break;
381 default:
382 return -EINVAL;
383 }
384
385 /* interface format */
386 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
387 case SND_SOC_DAIFMT_I2S:
388 iface |= 0x0002;
389 break;
390 case SND_SOC_DAIFMT_RIGHT_J:
391 break;
392 case SND_SOC_DAIFMT_LEFT_J:
393 iface |= 0x0001;
394 break;
395 case SND_SOC_DAIFMT_DSP_A:
396 iface |= 0x0003;
397 break;
398 case SND_SOC_DAIFMT_DSP_B:
399 iface |= 0x0013;
400 break;
401 default:
402 return -EINVAL;
403 }
404
405 /* clock inversion */
406 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
407 case SND_SOC_DAIFMT_NB_NF:
408 break;
409 case SND_SOC_DAIFMT_IB_IF:
410 iface |= 0x0090;
411 break;
412 case SND_SOC_DAIFMT_IB_NF:
413 iface |= 0x0080;
414 break;
415 case SND_SOC_DAIFMT_NB_IF:
416 iface |= 0x0010;
417 break;
418 default:
419 return -EINVAL;
420 }
421
422 /* set iface */
423 wm8960_write(codec, WM8960_IFACE1, iface);
424 return 0;
425}
426
427static int wm8960_hw_params(struct snd_pcm_substream *substream,
428 struct snd_pcm_hw_params *params,
429 struct snd_soc_dai *dai)
430{
431 struct snd_soc_pcm_runtime *rtd = substream->private_data;
432 struct snd_soc_device *socdev = rtd->socdev;
433 struct snd_soc_codec *codec = socdev->card->codec;
434 u16 iface = wm8960_read(codec, WM8960_IFACE1) & 0xfff3;
435
436 /* bit size */
437 switch (params_format(params)) {
438 case SNDRV_PCM_FORMAT_S16_LE:
439 break;
440 case SNDRV_PCM_FORMAT_S20_3LE:
441 iface |= 0x0004;
442 break;
443 case SNDRV_PCM_FORMAT_S24_LE:
444 iface |= 0x0008;
445 break;
446 }
447
448 /* set iface */
449 wm8960_write(codec, WM8960_IFACE1, iface);
450 return 0;
451}
452
453static int wm8960_mute(struct snd_soc_dai *dai, int mute)
454{
455 struct snd_soc_codec *codec = dai->codec;
456 u16 mute_reg = wm8960_read(codec, WM8960_DACCTL1) & 0xfff7;
457
458 if (mute)
459 wm8960_write(codec, WM8960_DACCTL1, mute_reg | 0x8);
460 else
461 wm8960_write(codec, WM8960_DACCTL1, mute_reg);
462 return 0;
463}
464
465static int wm8960_set_bias_level(struct snd_soc_codec *codec,
466 enum snd_soc_bias_level level)
467{
468 struct wm8960_data *pdata = codec->dev->platform_data;
469 u16 reg;
470
471 switch (level) {
472 case SND_SOC_BIAS_ON:
473 break;
474
475 case SND_SOC_BIAS_PREPARE:
476 /* Set VMID to 2x50k */
477 reg = wm8960_read(codec, WM8960_POWER1);
478 reg &= ~0x180;
479 reg |= 0x80;
480 wm8960_write(codec, WM8960_POWER1, reg);
481 break;
482
483 case SND_SOC_BIAS_STANDBY:
484 if (codec->bias_level == SND_SOC_BIAS_OFF) {
485 /* Enable anti-pop features */
486 wm8960_write(codec, WM8960_APOP1,
487 WM8960_POBCTRL | WM8960_SOFT_ST |
488 WM8960_BUFDCOPEN | WM8960_BUFIOEN);
489
490 /* Discharge HP output */
491 reg = WM8960_DISOP;
492 if (pdata)
493 reg |= pdata->dres << 4;
494 wm8960_write(codec, WM8960_APOP2, reg);
495
496 msleep(400);
497
498 wm8960_write(codec, WM8960_APOP2, 0);
499
500 /* Enable & ramp VMID at 2x50k */
501 reg = wm8960_read(codec, WM8960_POWER1);
502 reg |= 0x80;
503 wm8960_write(codec, WM8960_POWER1, reg);
504 msleep(100);
505
506 /* Enable VREF */
507 wm8960_write(codec, WM8960_POWER1, reg | WM8960_VREF);
508
509 /* Disable anti-pop features */
510 wm8960_write(codec, WM8960_APOP1, WM8960_BUFIOEN);
511 }
512
513 /* Set VMID to 2x250k */
514 reg = wm8960_read(codec, WM8960_POWER1);
515 reg &= ~0x180;
516 reg |= 0x100;
517 wm8960_write(codec, WM8960_POWER1, reg);
518 break;
519
520 case SND_SOC_BIAS_OFF:
521 /* Enable anti-pop features */
522 wm8960_write(codec, WM8960_APOP1,
523 WM8960_POBCTRL | WM8960_SOFT_ST |
524 WM8960_BUFDCOPEN | WM8960_BUFIOEN);
525
526 /* Disable VMID and VREF, let them discharge */
527 wm8960_write(codec, WM8960_POWER1, 0);
528 msleep(600);
529
530 wm8960_write(codec, WM8960_APOP1, 0);
531 break;
532 }
533
534 codec->bias_level = level;
535
536 return 0;
537}
538
539/* PLL divisors */
540struct _pll_div {
541 u32 pre_div:1;
542 u32 n:4;
543 u32 k:24;
544};
545
546/* The size in bits of the pll divide multiplied by 10
547 * to allow rounding later */
548#define FIXED_PLL_SIZE ((1 << 24) * 10)
549
550static int pll_factors(unsigned int source, unsigned int target,
551 struct _pll_div *pll_div)
552{
553 unsigned long long Kpart;
554 unsigned int K, Ndiv, Nmod;
555
556 pr_debug("WM8960 PLL: setting %dHz->%dHz\n", source, target);
557
558 /* Scale up target to PLL operating frequency */
559 target *= 4;
560
561 Ndiv = target / source;
562 if (Ndiv < 6) {
563 source >>= 1;
564 pll_div->pre_div = 1;
565 Ndiv = target / source;
566 } else
567 pll_div->pre_div = 0;
568
569 if ((Ndiv < 6) || (Ndiv > 12)) {
570 pr_err("WM8960 PLL: Unsupported N=%d\n", Ndiv);
571 return -EINVAL;
572 }
573
574 pll_div->n = Ndiv;
575 Nmod = target % source;
576 Kpart = FIXED_PLL_SIZE * (long long)Nmod;
577
578 do_div(Kpart, source);
579
580 K = Kpart & 0xFFFFFFFF;
581
582 /* Check if we need to round */
583 if ((K % 10) >= 5)
584 K += 5;
585
586 /* Move down to proper range now rounding is done */
587 K /= 10;
588
589 pll_div->k = K;
590
591 pr_debug("WM8960 PLL: N=%x K=%x pre_div=%d\n",
592 pll_div->n, pll_div->k, pll_div->pre_div);
593
594 return 0;
595}
596
597static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai,
598 int pll_id, unsigned int freq_in, unsigned int freq_out)
599{
600 struct snd_soc_codec *codec = codec_dai->codec;
601 u16 reg;
602 static struct _pll_div pll_div;
603 int ret;
604
605 if (freq_in && freq_out) {
606 ret = pll_factors(freq_in, freq_out, &pll_div);
607 if (ret != 0)
608 return ret;
609 }
610
611 /* Disable the PLL: even if we are changing the frequency the
612 * PLL needs to be disabled while we do so. */
613 wm8960_write(codec, WM8960_CLOCK1,
614 wm8960_read(codec, WM8960_CLOCK1) & ~1);
615 wm8960_write(codec, WM8960_POWER2,
616 wm8960_read(codec, WM8960_POWER2) & ~1);
617
618 if (!freq_in || !freq_out)
619 return 0;
620
621 reg = wm8960_read(codec, WM8960_PLL1) & ~0x3f;
622 reg |= pll_div.pre_div << 4;
623 reg |= pll_div.n;
624
625 if (pll_div.k) {
626 reg |= 0x20;
627
628 wm8960_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
629 wm8960_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
630 wm8960_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
631 }
632 wm8960_write(codec, WM8960_PLL1, reg);
633
634 /* Turn it on */
635 wm8960_write(codec, WM8960_POWER2,
636 wm8960_read(codec, WM8960_POWER2) | 1);
637 msleep(250);
638 wm8960_write(codec, WM8960_CLOCK1,
639 wm8960_read(codec, WM8960_CLOCK1) | 1);
640
641 return 0;
642}
643
644static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
645 int div_id, int div)
646{
647 struct snd_soc_codec *codec = codec_dai->codec;
648 u16 reg;
649
650 switch (div_id) {
651 case WM8960_SYSCLKSEL:
652 reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1fe;
653 wm8960_write(codec, WM8960_CLOCK1, reg | div);
654 break;
655 case WM8960_SYSCLKDIV:
656 reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1f9;
657 wm8960_write(codec, WM8960_CLOCK1, reg | div);
658 break;
659 case WM8960_DACDIV:
660 reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1c7;
661 wm8960_write(codec, WM8960_CLOCK1, reg | div);
662 break;
663 case WM8960_OPCLKDIV:
664 reg = wm8960_read(codec, WM8960_PLL1) & 0x03f;
665 wm8960_write(codec, WM8960_PLL1, reg | div);
666 break;
667 case WM8960_DCLKDIV:
668 reg = wm8960_read(codec, WM8960_CLOCK2) & 0x03f;
669 wm8960_write(codec, WM8960_CLOCK2, reg | div);
670 break;
671 case WM8960_TOCLKSEL:
672 reg = wm8960_read(codec, WM8960_ADDCTL1) & 0x1fd;
673 wm8960_write(codec, WM8960_ADDCTL1, reg | div);
674 break;
675 default:
676 return -EINVAL;
677 }
678
679 return 0;
680}
681
682#define WM8960_RATES SNDRV_PCM_RATE_8000_48000
683
684#define WM8960_FORMATS \
685 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
686 SNDRV_PCM_FMTBIT_S24_LE)
687
688static struct snd_soc_dai_ops wm8960_dai_ops = {
689 .hw_params = wm8960_hw_params,
690 .digital_mute = wm8960_mute,
691 .set_fmt = wm8960_set_dai_fmt,
692 .set_clkdiv = wm8960_set_dai_clkdiv,
693 .set_pll = wm8960_set_dai_pll,
694};
695
696struct snd_soc_dai wm8960_dai = {
697 .name = "WM8960",
698 .playback = {
699 .stream_name = "Playback",
700 .channels_min = 1,
701 .channels_max = 2,
702 .rates = WM8960_RATES,
703 .formats = WM8960_FORMATS,},
704 .capture = {
705 .stream_name = "Capture",
706 .channels_min = 1,
707 .channels_max = 2,
708 .rates = WM8960_RATES,
709 .formats = WM8960_FORMATS,},
710 .ops = &wm8960_dai_ops,
711 .symmetric_rates = 1,
712};
713EXPORT_SYMBOL_GPL(wm8960_dai);
714
715static int wm8960_suspend(struct platform_device *pdev, pm_message_t state)
716{
717 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
718 struct snd_soc_codec *codec = socdev->card->codec;
719
720 wm8960_set_bias_level(codec, SND_SOC_BIAS_OFF);
721 return 0;
722}
723
724static int wm8960_resume(struct platform_device *pdev)
725{
726 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
727 struct snd_soc_codec *codec = socdev->card->codec;
728 int i;
729 u8 data[2];
730 u16 *cache = codec->reg_cache;
731
732 /* Sync reg_cache with the hardware */
733 for (i = 0; i < ARRAY_SIZE(wm8960_reg); i++) {
734 data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
735 data[1] = cache[i] & 0x00ff;
736 codec->hw_write(codec->control_data, data, 2);
737 }
738
739 wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
740 wm8960_set_bias_level(codec, codec->suspend_bias_level);
741 return 0;
742}
743
744static struct snd_soc_codec *wm8960_codec;
745
746static int wm8960_probe(struct platform_device *pdev)
747{
748 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
749 struct snd_soc_codec *codec;
750 int ret = 0;
751
752 if (wm8960_codec == NULL) {
753 dev_err(&pdev->dev, "Codec device not registered\n");
754 return -ENODEV;
755 }
756
757 socdev->card->codec = wm8960_codec;
758 codec = wm8960_codec;
759
760 /* register pcms */
761 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
762 if (ret < 0) {
763 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
764 goto pcm_err;
765 }
766
767 snd_soc_add_controls(codec, wm8960_snd_controls,
768 ARRAY_SIZE(wm8960_snd_controls));
769 wm8960_add_widgets(codec);
770 ret = snd_soc_init_card(socdev);
771 if (ret < 0) {
772 dev_err(codec->dev, "failed to register card: %d\n", ret);
773 goto card_err;
774 }
775
776 return ret;
777
778card_err:
779 snd_soc_free_pcms(socdev);
780 snd_soc_dapm_free(socdev);
781pcm_err:
782 return ret;
783}
784
785/* power down chip */
786static int wm8960_remove(struct platform_device *pdev)
787{
788 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
789
790 snd_soc_free_pcms(socdev);
791 snd_soc_dapm_free(socdev);
792
793 return 0;
794}
795
796struct snd_soc_codec_device soc_codec_dev_wm8960 = {
797 .probe = wm8960_probe,
798 .remove = wm8960_remove,
799 .suspend = wm8960_suspend,
800 .resume = wm8960_resume,
801};
802EXPORT_SYMBOL_GPL(soc_codec_dev_wm8960);
803
804static int wm8960_register(struct wm8960_priv *wm8960)
805{
806 struct wm8960_data *pdata = wm8960->codec.dev->platform_data;
807 struct snd_soc_codec *codec = &wm8960->codec;
808 int ret;
809 u16 reg;
810
811 if (wm8960_codec) {
812 dev_err(codec->dev, "Another WM8960 is registered\n");
813 return -EINVAL;
814 }
815
816 if (!pdata) {
817 dev_warn(codec->dev, "No platform data supplied\n");
818 } else {
819 if (pdata->dres > WM8960_DRES_MAX) {
820 dev_err(codec->dev, "Invalid DRES: %d\n", pdata->dres);
821 pdata->dres = 0;
822 }
823 }
824
825 mutex_init(&codec->mutex);
826 INIT_LIST_HEAD(&codec->dapm_widgets);
827 INIT_LIST_HEAD(&codec->dapm_paths);
828
829 codec->private_data = wm8960;
830 codec->name = "WM8960";
831 codec->owner = THIS_MODULE;
832 codec->read = wm8960_read_reg_cache;
833 codec->write = wm8960_write;
834 codec->bias_level = SND_SOC_BIAS_OFF;
835 codec->set_bias_level = wm8960_set_bias_level;
836 codec->dai = &wm8960_dai;
837 codec->num_dai = 1;
838 codec->reg_cache_size = WM8960_CACHEREGNUM;
839 codec->reg_cache = &wm8960->reg_cache;
840
841 memcpy(codec->reg_cache, wm8960_reg, sizeof(wm8960_reg));
842
843 ret = wm8960_reset(codec);
844 if (ret < 0) {
845 dev_err(codec->dev, "Failed to issue reset\n");
846 return ret;
847 }
848
849 wm8960_dai.dev = codec->dev;
850
851 wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
852
853 /* Latch the update bits */
854 reg = wm8960_read(codec, WM8960_LINVOL);
855 wm8960_write(codec, WM8960_LINVOL, reg | 0x100);
856 reg = wm8960_read(codec, WM8960_RINVOL);
857 wm8960_write(codec, WM8960_RINVOL, reg | 0x100);
858 reg = wm8960_read(codec, WM8960_LADC);
859 wm8960_write(codec, WM8960_LADC, reg | 0x100);
860 reg = wm8960_read(codec, WM8960_RADC);
861 wm8960_write(codec, WM8960_RADC, reg | 0x100);
862 reg = wm8960_read(codec, WM8960_LDAC);
863 wm8960_write(codec, WM8960_LDAC, reg | 0x100);
864 reg = wm8960_read(codec, WM8960_RDAC);
865 wm8960_write(codec, WM8960_RDAC, reg | 0x100);
866 reg = wm8960_read(codec, WM8960_LOUT1);
867 wm8960_write(codec, WM8960_LOUT1, reg | 0x100);
868 reg = wm8960_read(codec, WM8960_ROUT1);
869 wm8960_write(codec, WM8960_ROUT1, reg | 0x100);
870 reg = wm8960_read(codec, WM8960_LOUT2);
871 wm8960_write(codec, WM8960_LOUT2, reg | 0x100);
872 reg = wm8960_read(codec, WM8960_ROUT2);
873 wm8960_write(codec, WM8960_ROUT2, reg | 0x100);
874
875 wm8960_codec = codec;
876
877 ret = snd_soc_register_codec(codec);
878 if (ret != 0) {
879 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
880 return ret;
881 }
882
883 ret = snd_soc_register_dai(&wm8960_dai);
884 if (ret != 0) {
885 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
886 snd_soc_unregister_codec(codec);
887 return ret;
888 }
889
890 return 0;
891}
892
893static void wm8960_unregister(struct wm8960_priv *wm8960)
894{
895 wm8960_set_bias_level(&wm8960->codec, SND_SOC_BIAS_OFF);
896 snd_soc_unregister_dai(&wm8960_dai);
897 snd_soc_unregister_codec(&wm8960->codec);
898 kfree(wm8960);
899 wm8960_codec = NULL;
900}
901
902static __devinit int wm8960_i2c_probe(struct i2c_client *i2c,
903 const struct i2c_device_id *id)
904{
905 struct wm8960_priv *wm8960;
906 struct snd_soc_codec *codec;
907
908 wm8960 = kzalloc(sizeof(struct wm8960_priv), GFP_KERNEL);
909 if (wm8960 == NULL)
910 return -ENOMEM;
911
912 codec = &wm8960->codec;
913 codec->hw_write = (hw_write_t)i2c_master_send;
914
915 i2c_set_clientdata(i2c, wm8960);
916 codec->control_data = i2c;
917
918 codec->dev = &i2c->dev;
919
920 return wm8960_register(wm8960);
921}
922
923static __devexit int wm8960_i2c_remove(struct i2c_client *client)
924{
925 struct wm8960_priv *wm8960 = i2c_get_clientdata(client);
926 wm8960_unregister(wm8960);
927 return 0;
928}
929
930static const struct i2c_device_id wm8960_i2c_id[] = {
931 { "wm8960", 0 },
932 { }
933};
934MODULE_DEVICE_TABLE(i2c, wm8960_i2c_id);
935
936static struct i2c_driver wm8960_i2c_driver = {
937 .driver = {
938 .name = "WM8960 I2C Codec",
939 .owner = THIS_MODULE,
940 },
941 .probe = wm8960_i2c_probe,
942 .remove = __devexit_p(wm8960_i2c_remove),
943 .id_table = wm8960_i2c_id,
944};
945
946static int __init wm8960_modinit(void)
947{
948 int ret;
949
950 ret = i2c_add_driver(&wm8960_i2c_driver);
951 if (ret != 0) {
952 printk(KERN_ERR "Failed to register WM8960 I2C driver: %d\n",
953 ret);
954 }
955
956 return ret;
957}
958module_init(wm8960_modinit);
959
960static void __exit wm8960_exit(void)
961{
962 i2c_del_driver(&wm8960_i2c_driver);
963}
964module_exit(wm8960_exit);
965
966
967MODULE_DESCRIPTION("ASoC WM8960 driver");
968MODULE_AUTHOR("Liam Girdwood");
969MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8960.h b/sound/soc/codecs/wm8960.h
new file mode 100644
index 000000000000..c9af56c9d9d4
--- /dev/null
+++ b/sound/soc/codecs/wm8960.h
@@ -0,0 +1,127 @@
1/*
2 * wm8960.h -- WM8960 Soc Audio driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _WM8960_H
10#define _WM8960_H
11
12/* WM8960 register space */
13
14
15#define WM8960_CACHEREGNUM 56
16
17#define WM8960_LINVOL 0x0
18#define WM8960_RINVOL 0x1
19#define WM8960_LOUT1 0x2
20#define WM8960_ROUT1 0x3
21#define WM8960_CLOCK1 0x4
22#define WM8960_DACCTL1 0x5
23#define WM8960_DACCTL2 0x6
24#define WM8960_IFACE1 0x7
25#define WM8960_CLOCK2 0x8
26#define WM8960_IFACE2 0x9
27#define WM8960_LDAC 0xa
28#define WM8960_RDAC 0xb
29
30#define WM8960_RESET 0xf
31#define WM8960_3D 0x10
32#define WM8960_ALC1 0x11
33#define WM8960_ALC2 0x12
34#define WM8960_ALC3 0x13
35#define WM8960_NOISEG 0x14
36#define WM8960_LADC 0x15
37#define WM8960_RADC 0x16
38#define WM8960_ADDCTL1 0x17
39#define WM8960_ADDCTL2 0x18
40#define WM8960_POWER1 0x19
41#define WM8960_POWER2 0x1a
42#define WM8960_ADDCTL3 0x1b
43#define WM8960_APOP1 0x1c
44#define WM8960_APOP2 0x1d
45
46#define WM8960_LINPATH 0x20
47#define WM8960_RINPATH 0x21
48#define WM8960_LOUTMIX 0x22
49
50#define WM8960_ROUTMIX 0x25
51#define WM8960_MONOMIX1 0x26
52#define WM8960_MONOMIX2 0x27
53#define WM8960_LOUT2 0x28
54#define WM8960_ROUT2 0x29
55#define WM8960_MONO 0x2a
56#define WM8960_INBMIX1 0x2b
57#define WM8960_INBMIX2 0x2c
58#define WM8960_BYPASS1 0x2d
59#define WM8960_BYPASS2 0x2e
60#define WM8960_POWER3 0x2f
61#define WM8960_ADDCTL4 0x30
62#define WM8960_CLASSD1 0x31
63
64#define WM8960_CLASSD3 0x33
65#define WM8960_PLL1 0x34
66#define WM8960_PLL2 0x35
67#define WM8960_PLL3 0x36
68#define WM8960_PLL4 0x37
69
70
71/*
72 * WM8960 Clock dividers
73 */
74#define WM8960_SYSCLKDIV 0
75#define WM8960_DACDIV 1
76#define WM8960_OPCLKDIV 2
77#define WM8960_DCLKDIV 3
78#define WM8960_TOCLKSEL 4
79#define WM8960_SYSCLKSEL 5
80
81#define WM8960_SYSCLK_DIV_1 (0 << 1)
82#define WM8960_SYSCLK_DIV_2 (2 << 1)
83
84#define WM8960_SYSCLK_MCLK (0 << 0)
85#define WM8960_SYSCLK_PLL (1 << 0)
86
87#define WM8960_DAC_DIV_1 (0 << 3)
88#define WM8960_DAC_DIV_1_5 (1 << 3)
89#define WM8960_DAC_DIV_2 (2 << 3)
90#define WM8960_DAC_DIV_3 (3 << 3)
91#define WM8960_DAC_DIV_4 (4 << 3)
92#define WM8960_DAC_DIV_5_5 (5 << 3)
93#define WM8960_DAC_DIV_6 (6 << 3)
94
95#define WM8960_DCLK_DIV_1_5 (0 << 6)
96#define WM8960_DCLK_DIV_2 (1 << 6)
97#define WM8960_DCLK_DIV_3 (2 << 6)
98#define WM8960_DCLK_DIV_4 (3 << 6)
99#define WM8960_DCLK_DIV_6 (4 << 6)
100#define WM8960_DCLK_DIV_8 (5 << 6)
101#define WM8960_DCLK_DIV_12 (6 << 6)
102#define WM8960_DCLK_DIV_16 (7 << 6)
103
104#define WM8960_TOCLK_F19 (0 << 1)
105#define WM8960_TOCLK_F21 (1 << 1)
106
107#define WM8960_OPCLK_DIV_1 (0 << 0)
108#define WM8960_OPCLK_DIV_2 (1 << 0)
109#define WM8960_OPCLK_DIV_3 (2 << 0)
110#define WM8960_OPCLK_DIV_4 (3 << 0)
111#define WM8960_OPCLK_DIV_5_5 (4 << 0)
112#define WM8960_OPCLK_DIV_6 (5 << 0)
113
114extern struct snd_soc_dai wm8960_dai;
115extern struct snd_soc_codec_device soc_codec_dev_wm8960;
116
117#define WM8960_DRES_400R 0
118#define WM8960_DRES_200R 1
119#define WM8960_DRES_600R 2
120#define WM8960_DRES_150R 3
121#define WM8960_DRES_MAX 3
122
123struct wm8960_data {
124 int dres;
125};
126
127#endif
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
new file mode 100644
index 000000000000..c05f71803aa8
--- /dev/null
+++ b/sound/soc/codecs/wm8988.c
@@ -0,0 +1,1097 @@
1/*
2 * wm8988.c -- WM8988 ALSA SoC audio driver
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 * Copyright 2005 Openedhand Ltd.
6 *
7 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/pm.h>
19#include <linux/i2c.h>
20#include <linux/spi/spi.h>
21#include <linux/platform_device.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/tlv.h>
26#include <sound/soc.h>
27#include <sound/soc-dapm.h>
28#include <sound/initval.h>
29
30#include "wm8988.h"
31
32/*
33 * wm8988 register cache
34 * We can't read the WM8988 register space when we
35 * are using 2 wire for device control, so we cache them instead.
36 */
37static const u16 wm8988_reg[] = {
38 0x0097, 0x0097, 0x0079, 0x0079, /* 0 */
39 0x0000, 0x0008, 0x0000, 0x000a, /* 4 */
40 0x0000, 0x0000, 0x00ff, 0x00ff, /* 8 */
41 0x000f, 0x000f, 0x0000, 0x0000, /* 12 */
42 0x0000, 0x007b, 0x0000, 0x0032, /* 16 */
43 0x0000, 0x00c3, 0x00c3, 0x00c0, /* 20 */
44 0x0000, 0x0000, 0x0000, 0x0000, /* 24 */
45 0x0000, 0x0000, 0x0000, 0x0000, /* 28 */
46 0x0000, 0x0000, 0x0050, 0x0050, /* 32 */
47 0x0050, 0x0050, 0x0050, 0x0050, /* 36 */
48 0x0079, 0x0079, 0x0079, /* 40 */
49};
50
51/* codec private data */
52struct wm8988_priv {
53 unsigned int sysclk;
54 struct snd_soc_codec codec;
55 struct snd_pcm_hw_constraint_list *sysclk_constraints;
56 u16 reg_cache[WM8988_NUM_REG];
57};
58
59
60/*
61 * read wm8988 register cache
62 */
63static inline unsigned int wm8988_read_reg_cache(struct snd_soc_codec *codec,
64 unsigned int reg)
65{
66 u16 *cache = codec->reg_cache;
67 if (reg > WM8988_NUM_REG)
68 return -1;
69 return cache[reg];
70}
71
72/*
73 * write wm8988 register cache
74 */
75static inline void wm8988_write_reg_cache(struct snd_soc_codec *codec,
76 unsigned int reg, unsigned int value)
77{
78 u16 *cache = codec->reg_cache;
79 if (reg > WM8988_NUM_REG)
80 return;
81 cache[reg] = value;
82}
83
84static int wm8988_write(struct snd_soc_codec *codec, unsigned int reg,
85 unsigned int value)
86{
87 u8 data[2];
88
89 /* data is
90 * D15..D9 WM8753 register offset
91 * D8...D0 register data
92 */
93 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
94 data[1] = value & 0x00ff;
95
96 wm8988_write_reg_cache(codec, reg, value);
97 if (codec->hw_write(codec->control_data, data, 2) == 2)
98 return 0;
99 else
100 return -EIO;
101}
102
103#define wm8988_reset(c) wm8988_write(c, WM8988_RESET, 0)
104
105/*
106 * WM8988 Controls
107 */
108
109static const char *bass_boost_txt[] = {"Linear Control", "Adaptive Boost"};
110static const struct soc_enum bass_boost =
111 SOC_ENUM_SINGLE(WM8988_BASS, 7, 2, bass_boost_txt);
112
113static const char *bass_filter_txt[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" };
114static const struct soc_enum bass_filter =
115 SOC_ENUM_SINGLE(WM8988_BASS, 6, 2, bass_filter_txt);
116
117static const char *treble_txt[] = {"8kHz", "4kHz"};
118static const struct soc_enum treble =
119 SOC_ENUM_SINGLE(WM8988_TREBLE, 6, 2, treble_txt);
120
121static const char *stereo_3d_lc_txt[] = {"200Hz", "500Hz"};
122static const struct soc_enum stereo_3d_lc =
123 SOC_ENUM_SINGLE(WM8988_3D, 5, 2, stereo_3d_lc_txt);
124
125static const char *stereo_3d_uc_txt[] = {"2.2kHz", "1.5kHz"};
126static const struct soc_enum stereo_3d_uc =
127 SOC_ENUM_SINGLE(WM8988_3D, 6, 2, stereo_3d_uc_txt);
128
129static const char *stereo_3d_func_txt[] = {"Capture", "Playback"};
130static const struct soc_enum stereo_3d_func =
131 SOC_ENUM_SINGLE(WM8988_3D, 7, 2, stereo_3d_func_txt);
132
133static const char *alc_func_txt[] = {"Off", "Right", "Left", "Stereo"};
134static const struct soc_enum alc_func =
135 SOC_ENUM_SINGLE(WM8988_ALC1, 7, 4, alc_func_txt);
136
137static const char *ng_type_txt[] = {"Constant PGA Gain",
138 "Mute ADC Output"};
139static const struct soc_enum ng_type =
140 SOC_ENUM_SINGLE(WM8988_NGATE, 1, 2, ng_type_txt);
141
142static const char *deemph_txt[] = {"None", "32Khz", "44.1Khz", "48Khz"};
143static const struct soc_enum deemph =
144 SOC_ENUM_SINGLE(WM8988_ADCDAC, 1, 4, deemph_txt);
145
146static const char *adcpol_txt[] = {"Normal", "L Invert", "R Invert",
147 "L + R Invert"};
148static const struct soc_enum adcpol =
149 SOC_ENUM_SINGLE(WM8988_ADCDAC, 5, 4, adcpol_txt);
150
151static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0);
152static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
153static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
154static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
155static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
156
157static const struct snd_kcontrol_new wm8988_snd_controls[] = {
158
159SOC_ENUM("Bass Boost", bass_boost),
160SOC_ENUM("Bass Filter", bass_filter),
161SOC_SINGLE("Bass Volume", WM8988_BASS, 0, 15, 1),
162
163SOC_SINGLE("Treble Volume", WM8988_TREBLE, 0, 15, 0),
164SOC_ENUM("Treble Cut-off", treble),
165
166SOC_SINGLE("3D Switch", WM8988_3D, 0, 1, 0),
167SOC_SINGLE("3D Volume", WM8988_3D, 1, 15, 0),
168SOC_ENUM("3D Lower Cut-off", stereo_3d_lc),
169SOC_ENUM("3D Upper Cut-off", stereo_3d_uc),
170SOC_ENUM("3D Mode", stereo_3d_func),
171
172SOC_SINGLE("ALC Capture Target Volume", WM8988_ALC1, 0, 7, 0),
173SOC_SINGLE("ALC Capture Max Volume", WM8988_ALC1, 4, 7, 0),
174SOC_ENUM("ALC Capture Function", alc_func),
175SOC_SINGLE("ALC Capture ZC Switch", WM8988_ALC2, 7, 1, 0),
176SOC_SINGLE("ALC Capture Hold Time", WM8988_ALC2, 0, 15, 0),
177SOC_SINGLE("ALC Capture Decay Time", WM8988_ALC3, 4, 15, 0),
178SOC_SINGLE("ALC Capture Attack Time", WM8988_ALC3, 0, 15, 0),
179SOC_SINGLE("ALC Capture NG Threshold", WM8988_NGATE, 3, 31, 0),
180SOC_ENUM("ALC Capture NG Type", ng_type),
181SOC_SINGLE("ALC Capture NG Switch", WM8988_NGATE, 0, 1, 0),
182
183SOC_SINGLE("ZC Timeout Switch", WM8988_ADCTL1, 0, 1, 0),
184
185SOC_DOUBLE_R_TLV("Capture Digital Volume", WM8988_LADC, WM8988_RADC,
186 0, 255, 0, adc_tlv),
187SOC_DOUBLE_R_TLV("Capture Volume", WM8988_LINVOL, WM8988_RINVOL,
188 0, 63, 0, pga_tlv),
189SOC_DOUBLE_R("Capture ZC Switch", WM8988_LINVOL, WM8988_RINVOL, 6, 1, 0),
190SOC_DOUBLE_R("Capture Switch", WM8988_LINVOL, WM8988_RINVOL, 7, 1, 1),
191
192SOC_ENUM("Playback De-emphasis", deemph),
193
194SOC_ENUM("Capture Polarity", adcpol),
195SOC_SINGLE("Playback 6dB Attenuate", WM8988_ADCDAC, 7, 1, 0),
196SOC_SINGLE("Capture 6dB Attenuate", WM8988_ADCDAC, 8, 1, 0),
197
198SOC_DOUBLE_R_TLV("PCM Volume", WM8988_LDAC, WM8988_RDAC, 0, 255, 0, dac_tlv),
199
200SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", WM8988_LOUTM1, 4, 7, 1,
201 bypass_tlv),
202SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", WM8988_LOUTM2, 4, 7, 1,
203 bypass_tlv),
204SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", WM8988_ROUTM1, 4, 7, 1,
205 bypass_tlv),
206SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", WM8988_ROUTM2, 4, 7, 1,
207 bypass_tlv),
208
209SOC_DOUBLE_R("Output 1 Playback ZC Switch", WM8988_LOUT1V,
210 WM8988_ROUT1V, 7, 1, 0),
211SOC_DOUBLE_R_TLV("Output 1 Playback Volume", WM8988_LOUT1V, WM8988_ROUT1V,
212 0, 127, 0, out_tlv),
213
214SOC_DOUBLE_R("Output 2 Playback ZC Switch", WM8988_LOUT2V,
215 WM8988_ROUT2V, 7, 1, 0),
216SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V,
217 0, 127, 0, out_tlv),
218
219};
220
221/*
222 * DAPM Controls
223 */
224
225static int wm8988_lrc_control(struct snd_soc_dapm_widget *w,
226 struct snd_kcontrol *kcontrol, int event)
227{
228 struct snd_soc_codec *codec = w->codec;
229 u16 adctl2 = wm8988_read_reg_cache(codec, WM8988_ADCTL2);
230
231 /* Use the DAC to gate LRC if active, otherwise use ADC */
232 if (wm8988_read_reg_cache(codec, WM8988_PWR2) & 0x180)
233 adctl2 &= ~0x4;
234 else
235 adctl2 |= 0x4;
236
237 return wm8988_write(codec, WM8988_ADCTL2, adctl2);
238}
239
240static const char *wm8988_line_texts[] = {
241 "Line 1", "Line 2", "PGA", "Differential"};
242
243static const unsigned int wm8988_line_values[] = {
244 0, 1, 3, 4};
245
246static const struct soc_enum wm8988_lline_enum =
247 SOC_VALUE_ENUM_SINGLE(WM8988_LOUTM1, 0, 7,
248 ARRAY_SIZE(wm8988_line_texts),
249 wm8988_line_texts,
250 wm8988_line_values);
251static const struct snd_kcontrol_new wm8988_left_line_controls =
252 SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum);
253
254static const struct soc_enum wm8988_rline_enum =
255 SOC_VALUE_ENUM_SINGLE(WM8988_ROUTM1, 0, 7,
256 ARRAY_SIZE(wm8988_line_texts),
257 wm8988_line_texts,
258 wm8988_line_values);
259static const struct snd_kcontrol_new wm8988_right_line_controls =
260 SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum);
261
262/* Left Mixer */
263static const struct snd_kcontrol_new wm8988_left_mixer_controls[] = {
264 SOC_DAPM_SINGLE("Playback Switch", WM8988_LOUTM1, 8, 1, 0),
265 SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_LOUTM1, 7, 1, 0),
266 SOC_DAPM_SINGLE("Right Playback Switch", WM8988_LOUTM2, 8, 1, 0),
267 SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_LOUTM2, 7, 1, 0),
268};
269
270/* Right Mixer */
271static const struct snd_kcontrol_new wm8988_right_mixer_controls[] = {
272 SOC_DAPM_SINGLE("Left Playback Switch", WM8988_ROUTM1, 8, 1, 0),
273 SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_ROUTM1, 7, 1, 0),
274 SOC_DAPM_SINGLE("Playback Switch", WM8988_ROUTM2, 8, 1, 0),
275 SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_ROUTM2, 7, 1, 0),
276};
277
278static const char *wm8988_pga_sel[] = {"Line 1", "Line 2", "Differential"};
279static const unsigned int wm8988_pga_val[] = { 0, 1, 3 };
280
281/* Left PGA Mux */
282static const struct soc_enum wm8988_lpga_enum =
283 SOC_VALUE_ENUM_SINGLE(WM8988_LADCIN, 6, 3,
284 ARRAY_SIZE(wm8988_pga_sel),
285 wm8988_pga_sel,
286 wm8988_pga_val);
287static const struct snd_kcontrol_new wm8988_left_pga_controls =
288 SOC_DAPM_VALUE_ENUM("Route", wm8988_lpga_enum);
289
290/* Right PGA Mux */
291static const struct soc_enum wm8988_rpga_enum =
292 SOC_VALUE_ENUM_SINGLE(WM8988_RADCIN, 6, 3,
293 ARRAY_SIZE(wm8988_pga_sel),
294 wm8988_pga_sel,
295 wm8988_pga_val);
296static const struct snd_kcontrol_new wm8988_right_pga_controls =
297 SOC_DAPM_VALUE_ENUM("Route", wm8988_rpga_enum);
298
299/* Differential Mux */
300static const char *wm8988_diff_sel[] = {"Line 1", "Line 2"};
301static const struct soc_enum diffmux =
302 SOC_ENUM_SINGLE(WM8988_ADCIN, 8, 2, wm8988_diff_sel);
303static const struct snd_kcontrol_new wm8988_diffmux_controls =
304 SOC_DAPM_ENUM("Route", diffmux);
305
306/* Mono ADC Mux */
307static const char *wm8988_mono_mux[] = {"Stereo", "Mono (Left)",
308 "Mono (Right)", "Digital Mono"};
309static const struct soc_enum monomux =
310 SOC_ENUM_SINGLE(WM8988_ADCIN, 6, 4, wm8988_mono_mux);
311static const struct snd_kcontrol_new wm8988_monomux_controls =
312 SOC_DAPM_ENUM("Route", monomux);
313
314static const struct snd_soc_dapm_widget wm8988_dapm_widgets[] = {
315 SND_SOC_DAPM_MICBIAS("Mic Bias", WM8988_PWR1, 1, 0),
316
317 SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0,
318 &wm8988_diffmux_controls),
319 SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0,
320 &wm8988_monomux_controls),
321 SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0,
322 &wm8988_monomux_controls),
323
324 SND_SOC_DAPM_MUX("Left PGA Mux", WM8988_PWR1, 5, 0,
325 &wm8988_left_pga_controls),
326 SND_SOC_DAPM_MUX("Right PGA Mux", WM8988_PWR1, 4, 0,
327 &wm8988_right_pga_controls),
328
329 SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0,
330 &wm8988_left_line_controls),
331 SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0,
332 &wm8988_right_line_controls),
333
334 SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8988_PWR1, 2, 0),
335 SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8988_PWR1, 3, 0),
336
337 SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8988_PWR2, 7, 0),
338 SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8988_PWR2, 8, 0),
339
340 SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
341 &wm8988_left_mixer_controls[0],
342 ARRAY_SIZE(wm8988_left_mixer_controls)),
343 SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
344 &wm8988_right_mixer_controls[0],
345 ARRAY_SIZE(wm8988_right_mixer_controls)),
346
347 SND_SOC_DAPM_PGA("Right Out 2", WM8988_PWR2, 3, 0, NULL, 0),
348 SND_SOC_DAPM_PGA("Left Out 2", WM8988_PWR2, 4, 0, NULL, 0),
349 SND_SOC_DAPM_PGA("Right Out 1", WM8988_PWR2, 5, 0, NULL, 0),
350 SND_SOC_DAPM_PGA("Left Out 1", WM8988_PWR2, 6, 0, NULL, 0),
351
352 SND_SOC_DAPM_POST("LRC control", wm8988_lrc_control),
353
354 SND_SOC_DAPM_OUTPUT("LOUT1"),
355 SND_SOC_DAPM_OUTPUT("ROUT1"),
356 SND_SOC_DAPM_OUTPUT("LOUT2"),
357 SND_SOC_DAPM_OUTPUT("ROUT2"),
358 SND_SOC_DAPM_OUTPUT("VREF"),
359
360 SND_SOC_DAPM_INPUT("LINPUT1"),
361 SND_SOC_DAPM_INPUT("LINPUT2"),
362 SND_SOC_DAPM_INPUT("RINPUT1"),
363 SND_SOC_DAPM_INPUT("RINPUT2"),
364};
365
366static const struct snd_soc_dapm_route audio_map[] = {
367
368 { "Left Line Mux", "Line 1", "LINPUT1" },
369 { "Left Line Mux", "Line 2", "LINPUT2" },
370 { "Left Line Mux", "PGA", "Left PGA Mux" },
371 { "Left Line Mux", "Differential", "Differential Mux" },
372
373 { "Right Line Mux", "Line 1", "RINPUT1" },
374 { "Right Line Mux", "Line 2", "RINPUT2" },
375 { "Right Line Mux", "PGA", "Right PGA Mux" },
376 { "Right Line Mux", "Differential", "Differential Mux" },
377
378 { "Left PGA Mux", "Line 1", "LINPUT1" },
379 { "Left PGA Mux", "Line 2", "LINPUT2" },
380 { "Left PGA Mux", "Differential", "Differential Mux" },
381
382 { "Right PGA Mux", "Line 1", "RINPUT1" },
383 { "Right PGA Mux", "Line 2", "RINPUT2" },
384 { "Right PGA Mux", "Differential", "Differential Mux" },
385
386 { "Differential Mux", "Line 1", "LINPUT1" },
387 { "Differential Mux", "Line 1", "RINPUT1" },
388 { "Differential Mux", "Line 2", "LINPUT2" },
389 { "Differential Mux", "Line 2", "RINPUT2" },
390
391 { "Left ADC Mux", "Stereo", "Left PGA Mux" },
392 { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" },
393 { "Left ADC Mux", "Digital Mono", "Left PGA Mux" },
394
395 { "Right ADC Mux", "Stereo", "Right PGA Mux" },
396 { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" },
397 { "Right ADC Mux", "Digital Mono", "Right PGA Mux" },
398
399 { "Left ADC", NULL, "Left ADC Mux" },
400 { "Right ADC", NULL, "Right ADC Mux" },
401
402 { "Left Line Mux", "Line 1", "LINPUT1" },
403 { "Left Line Mux", "Line 2", "LINPUT2" },
404 { "Left Line Mux", "PGA", "Left PGA Mux" },
405 { "Left Line Mux", "Differential", "Differential Mux" },
406
407 { "Right Line Mux", "Line 1", "RINPUT1" },
408 { "Right Line Mux", "Line 2", "RINPUT2" },
409 { "Right Line Mux", "PGA", "Right PGA Mux" },
410 { "Right Line Mux", "Differential", "Differential Mux" },
411
412 { "Left Mixer", "Playback Switch", "Left DAC" },
413 { "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
414 { "Left Mixer", "Right Playback Switch", "Right DAC" },
415 { "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
416
417 { "Right Mixer", "Left Playback Switch", "Left DAC" },
418 { "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
419 { "Right Mixer", "Playback Switch", "Right DAC" },
420 { "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
421
422 { "Left Out 1", NULL, "Left Mixer" },
423 { "LOUT1", NULL, "Left Out 1" },
424 { "Right Out 1", NULL, "Right Mixer" },
425 { "ROUT1", NULL, "Right Out 1" },
426
427 { "Left Out 2", NULL, "Left Mixer" },
428 { "LOUT2", NULL, "Left Out 2" },
429 { "Right Out 2", NULL, "Right Mixer" },
430 { "ROUT2", NULL, "Right Out 2" },
431};
432
433struct _coeff_div {
434 u32 mclk;
435 u32 rate;
436 u16 fs;
437 u8 sr:5;
438 u8 usb:1;
439};
440
441/* codec hifi mclk clock divider coefficients */
442static const struct _coeff_div coeff_div[] = {
443 /* 8k */
444 {12288000, 8000, 1536, 0x6, 0x0},
445 {11289600, 8000, 1408, 0x16, 0x0},
446 {18432000, 8000, 2304, 0x7, 0x0},
447 {16934400, 8000, 2112, 0x17, 0x0},
448 {12000000, 8000, 1500, 0x6, 0x1},
449
450 /* 11.025k */
451 {11289600, 11025, 1024, 0x18, 0x0},
452 {16934400, 11025, 1536, 0x19, 0x0},
453 {12000000, 11025, 1088, 0x19, 0x1},
454
455 /* 16k */
456 {12288000, 16000, 768, 0xa, 0x0},
457 {18432000, 16000, 1152, 0xb, 0x0},
458 {12000000, 16000, 750, 0xa, 0x1},
459
460 /* 22.05k */
461 {11289600, 22050, 512, 0x1a, 0x0},
462 {16934400, 22050, 768, 0x1b, 0x0},
463 {12000000, 22050, 544, 0x1b, 0x1},
464
465 /* 32k */
466 {12288000, 32000, 384, 0xc, 0x0},
467 {18432000, 32000, 576, 0xd, 0x0},
468 {12000000, 32000, 375, 0xa, 0x1},
469
470 /* 44.1k */
471 {11289600, 44100, 256, 0x10, 0x0},
472 {16934400, 44100, 384, 0x11, 0x0},
473 {12000000, 44100, 272, 0x11, 0x1},
474
475 /* 48k */
476 {12288000, 48000, 256, 0x0, 0x0},
477 {18432000, 48000, 384, 0x1, 0x0},
478 {12000000, 48000, 250, 0x0, 0x1},
479
480 /* 88.2k */
481 {11289600, 88200, 128, 0x1e, 0x0},
482 {16934400, 88200, 192, 0x1f, 0x0},
483 {12000000, 88200, 136, 0x1f, 0x1},
484
485 /* 96k */
486 {12288000, 96000, 128, 0xe, 0x0},
487 {18432000, 96000, 192, 0xf, 0x0},
488 {12000000, 96000, 125, 0xe, 0x1},
489};
490
491static inline int get_coeff(int mclk, int rate)
492{
493 int i;
494
495 for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
496 if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk)
497 return i;
498 }
499
500 return -EINVAL;
501}
502
503/* The set of rates we can generate from the above for each SYSCLK */
504
505static unsigned int rates_12288[] = {
506 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000,
507};
508
509static struct snd_pcm_hw_constraint_list constraints_12288 = {
510 .count = ARRAY_SIZE(rates_12288),
511 .list = rates_12288,
512};
513
514static unsigned int rates_112896[] = {
515 8000, 11025, 22050, 44100,
516};
517
518static struct snd_pcm_hw_constraint_list constraints_112896 = {
519 .count = ARRAY_SIZE(rates_112896),
520 .list = rates_112896,
521};
522
523static unsigned int rates_12[] = {
524 8000, 11025, 12000, 16000, 22050, 2400, 32000, 41100, 48000,
525 48000, 88235, 96000,
526};
527
528static struct snd_pcm_hw_constraint_list constraints_12 = {
529 .count = ARRAY_SIZE(rates_12),
530 .list = rates_12,
531};
532
533/*
534 * Note that this should be called from init rather than from hw_params.
535 */
536static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai,
537 int clk_id, unsigned int freq, int dir)
538{
539 struct snd_soc_codec *codec = codec_dai->codec;
540 struct wm8988_priv *wm8988 = codec->private_data;
541
542 switch (freq) {
543 case 11289600:
544 case 18432000:
545 case 22579200:
546 case 36864000:
547 wm8988->sysclk_constraints = &constraints_112896;
548 wm8988->sysclk = freq;
549 return 0;
550
551 case 12288000:
552 case 16934400:
553 case 24576000:
554 case 33868800:
555 wm8988->sysclk_constraints = &constraints_12288;
556 wm8988->sysclk = freq;
557 return 0;
558
559 case 12000000:
560 case 24000000:
561 wm8988->sysclk_constraints = &constraints_12;
562 wm8988->sysclk = freq;
563 return 0;
564 }
565 return -EINVAL;
566}
567
568static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai,
569 unsigned int fmt)
570{
571 struct snd_soc_codec *codec = codec_dai->codec;
572 u16 iface = 0;
573
574 /* set master/slave audio interface */
575 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
576 case SND_SOC_DAIFMT_CBM_CFM:
577 iface = 0x0040;
578 break;
579 case SND_SOC_DAIFMT_CBS_CFS:
580 break;
581 default:
582 return -EINVAL;
583 }
584
585 /* interface format */
586 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
587 case SND_SOC_DAIFMT_I2S:
588 iface |= 0x0002;
589 break;
590 case SND_SOC_DAIFMT_RIGHT_J:
591 break;
592 case SND_SOC_DAIFMT_LEFT_J:
593 iface |= 0x0001;
594 break;
595 case SND_SOC_DAIFMT_DSP_A:
596 iface |= 0x0003;
597 break;
598 case SND_SOC_DAIFMT_DSP_B:
599 iface |= 0x0013;
600 break;
601 default:
602 return -EINVAL;
603 }
604
605 /* clock inversion */
606 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
607 case SND_SOC_DAIFMT_NB_NF:
608 break;
609 case SND_SOC_DAIFMT_IB_IF:
610 iface |= 0x0090;
611 break;
612 case SND_SOC_DAIFMT_IB_NF:
613 iface |= 0x0080;
614 break;
615 case SND_SOC_DAIFMT_NB_IF:
616 iface |= 0x0010;
617 break;
618 default:
619 return -EINVAL;
620 }
621
622 wm8988_write(codec, WM8988_IFACE, iface);
623 return 0;
624}
625
626static int wm8988_pcm_startup(struct snd_pcm_substream *substream,
627 struct snd_soc_dai *dai)
628{
629 struct snd_soc_codec *codec = dai->codec;
630 struct wm8988_priv *wm8988 = codec->private_data;
631
632 /* The set of sample rates that can be supported depends on the
633 * MCLK supplied to the CODEC - enforce this.
634 */
635 if (!wm8988->sysclk) {
636 dev_err(codec->dev,
637 "No MCLK configured, call set_sysclk() on init\n");
638 return -EINVAL;
639 }
640
641 snd_pcm_hw_constraint_list(substream->runtime, 0,
642 SNDRV_PCM_HW_PARAM_RATE,
643 wm8988->sysclk_constraints);
644
645 return 0;
646}
647
648static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
649 struct snd_pcm_hw_params *params,
650 struct snd_soc_dai *dai)
651{
652 struct snd_soc_pcm_runtime *rtd = substream->private_data;
653 struct snd_soc_device *socdev = rtd->socdev;
654 struct snd_soc_codec *codec = socdev->card->codec;
655 struct wm8988_priv *wm8988 = codec->private_data;
656 u16 iface = wm8988_read_reg_cache(codec, WM8988_IFACE) & 0x1f3;
657 u16 srate = wm8988_read_reg_cache(codec, WM8988_SRATE) & 0x180;
658 int coeff;
659
660 coeff = get_coeff(wm8988->sysclk, params_rate(params));
661 if (coeff < 0) {
662 coeff = get_coeff(wm8988->sysclk / 2, params_rate(params));
663 srate |= 0x40;
664 }
665 if (coeff < 0) {
666 dev_err(codec->dev,
667 "Unable to configure sample rate %dHz with %dHz MCLK\n",
668 params_rate(params), wm8988->sysclk);
669 return coeff;
670 }
671
672 /* bit size */
673 switch (params_format(params)) {
674 case SNDRV_PCM_FORMAT_S16_LE:
675 break;
676 case SNDRV_PCM_FORMAT_S20_3LE:
677 iface |= 0x0004;
678 break;
679 case SNDRV_PCM_FORMAT_S24_LE:
680 iface |= 0x0008;
681 break;
682 case SNDRV_PCM_FORMAT_S32_LE:
683 iface |= 0x000c;
684 break;
685 }
686
687 /* set iface & srate */
688 wm8988_write(codec, WM8988_IFACE, iface);
689 if (coeff >= 0)
690 wm8988_write(codec, WM8988_SRATE, srate |
691 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
692
693 return 0;
694}
695
696static int wm8988_mute(struct snd_soc_dai *dai, int mute)
697{
698 struct snd_soc_codec *codec = dai->codec;
699 u16 mute_reg = wm8988_read_reg_cache(codec, WM8988_ADCDAC) & 0xfff7;
700
701 if (mute)
702 wm8988_write(codec, WM8988_ADCDAC, mute_reg | 0x8);
703 else
704 wm8988_write(codec, WM8988_ADCDAC, mute_reg);
705 return 0;
706}
707
708static int wm8988_set_bias_level(struct snd_soc_codec *codec,
709 enum snd_soc_bias_level level)
710{
711 u16 pwr_reg = wm8988_read_reg_cache(codec, WM8988_PWR1) & ~0x1c1;
712
713 switch (level) {
714 case SND_SOC_BIAS_ON:
715 break;
716
717 case SND_SOC_BIAS_PREPARE:
718 /* VREF, VMID=2x50k, digital enabled */
719 wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x00c0);
720 break;
721
722 case SND_SOC_BIAS_STANDBY:
723 if (codec->bias_level == SND_SOC_BIAS_OFF) {
724 /* VREF, VMID=2x5k */
725 wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
726
727 /* Charge caps */
728 msleep(100);
729 }
730
731 /* VREF, VMID=2*500k, digital stopped */
732 wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x0141);
733 break;
734
735 case SND_SOC_BIAS_OFF:
736 wm8988_write(codec, WM8988_PWR1, 0x0000);
737 break;
738 }
739 codec->bias_level = level;
740 return 0;
741}
742
743#define WM8988_RATES SNDRV_PCM_RATE_8000_96000
744
745#define WM8988_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
746 SNDRV_PCM_FMTBIT_S24_LE)
747
748static struct snd_soc_dai_ops wm8988_ops = {
749 .startup = wm8988_pcm_startup,
750 .hw_params = wm8988_pcm_hw_params,
751 .set_fmt = wm8988_set_dai_fmt,
752 .set_sysclk = wm8988_set_dai_sysclk,
753 .digital_mute = wm8988_mute,
754};
755
756struct snd_soc_dai wm8988_dai = {
757 .name = "WM8988",
758 .playback = {
759 .stream_name = "Playback",
760 .channels_min = 1,
761 .channels_max = 2,
762 .rates = WM8988_RATES,
763 .formats = WM8988_FORMATS,
764 },
765 .capture = {
766 .stream_name = "Capture",
767 .channels_min = 1,
768 .channels_max = 2,
769 .rates = WM8988_RATES,
770 .formats = WM8988_FORMATS,
771 },
772 .ops = &wm8988_ops,
773 .symmetric_rates = 1,
774};
775EXPORT_SYMBOL_GPL(wm8988_dai);
776
777static int wm8988_suspend(struct platform_device *pdev, pm_message_t state)
778{
779 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
780 struct snd_soc_codec *codec = socdev->card->codec;
781
782 wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF);
783 return 0;
784}
785
786static int wm8988_resume(struct platform_device *pdev)
787{
788 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
789 struct snd_soc_codec *codec = socdev->card->codec;
790 int i;
791 u8 data[2];
792 u16 *cache = codec->reg_cache;
793
794 /* Sync reg_cache with the hardware */
795 for (i = 0; i < WM8988_NUM_REG; i++) {
796 if (i == WM8988_RESET)
797 continue;
798 data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
799 data[1] = cache[i] & 0x00ff;
800 codec->hw_write(codec->control_data, data, 2);
801 }
802
803 wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
804
805 return 0;
806}
807
808static struct snd_soc_codec *wm8988_codec;
809
810static int wm8988_probe(struct platform_device *pdev)
811{
812 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
813 struct snd_soc_codec *codec;
814 int ret = 0;
815
816 if (wm8988_codec == NULL) {
817 dev_err(&pdev->dev, "Codec device not registered\n");
818 return -ENODEV;
819 }
820
821 socdev->card->codec = wm8988_codec;
822 codec = wm8988_codec;
823
824 /* register pcms */
825 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
826 if (ret < 0) {
827 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
828 goto pcm_err;
829 }
830
831 snd_soc_add_controls(codec, wm8988_snd_controls,
832 ARRAY_SIZE(wm8988_snd_controls));
833 snd_soc_dapm_new_controls(codec, wm8988_dapm_widgets,
834 ARRAY_SIZE(wm8988_dapm_widgets));
835 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
836 snd_soc_dapm_new_widgets(codec);
837
838 ret = snd_soc_init_card(socdev);
839 if (ret < 0) {
840 dev_err(codec->dev, "failed to register card: %d\n", ret);
841 goto card_err;
842 }
843
844 return ret;
845
846card_err:
847 snd_soc_free_pcms(socdev);
848 snd_soc_dapm_free(socdev);
849pcm_err:
850 return ret;
851}
852
853static int wm8988_remove(struct platform_device *pdev)
854{
855 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
856
857 snd_soc_free_pcms(socdev);
858 snd_soc_dapm_free(socdev);
859
860 return 0;
861}
862
863struct snd_soc_codec_device soc_codec_dev_wm8988 = {
864 .probe = wm8988_probe,
865 .remove = wm8988_remove,
866 .suspend = wm8988_suspend,
867 .resume = wm8988_resume,
868};
869EXPORT_SYMBOL_GPL(soc_codec_dev_wm8988);
870
871static int wm8988_register(struct wm8988_priv *wm8988)
872{
873 struct snd_soc_codec *codec = &wm8988->codec;
874 int ret;
875 u16 reg;
876
877 if (wm8988_codec) {
878 dev_err(codec->dev, "Another WM8988 is registered\n");
879 ret = -EINVAL;
880 goto err;
881 }
882
883 mutex_init(&codec->mutex);
884 INIT_LIST_HEAD(&codec->dapm_widgets);
885 INIT_LIST_HEAD(&codec->dapm_paths);
886
887 codec->private_data = wm8988;
888 codec->name = "WM8988";
889 codec->owner = THIS_MODULE;
890 codec->read = wm8988_read_reg_cache;
891 codec->write = wm8988_write;
892 codec->dai = &wm8988_dai;
893 codec->num_dai = 1;
894 codec->reg_cache_size = ARRAY_SIZE(wm8988->reg_cache);
895 codec->reg_cache = &wm8988->reg_cache;
896 codec->bias_level = SND_SOC_BIAS_OFF;
897 codec->set_bias_level = wm8988_set_bias_level;
898
899 memcpy(codec->reg_cache, wm8988_reg,
900 sizeof(wm8988_reg));
901
902 ret = wm8988_reset(codec);
903 if (ret < 0) {
904 dev_err(codec->dev, "Failed to issue reset\n");
905 return ret;
906 }
907
908 /* set the update bits (we always update left then right) */
909 reg = wm8988_read_reg_cache(codec, WM8988_RADC);
910 wm8988_write(codec, WM8988_RADC, reg | 0x100);
911 reg = wm8988_read_reg_cache(codec, WM8988_RDAC);
912 wm8988_write(codec, WM8988_RDAC, reg | 0x0100);
913 reg = wm8988_read_reg_cache(codec, WM8988_ROUT1V);
914 wm8988_write(codec, WM8988_ROUT1V, reg | 0x0100);
915 reg = wm8988_read_reg_cache(codec, WM8988_ROUT2V);
916 wm8988_write(codec, WM8988_ROUT2V, reg | 0x0100);
917 reg = wm8988_read_reg_cache(codec, WM8988_RINVOL);
918 wm8988_write(codec, WM8988_RINVOL, reg | 0x0100);
919
920 wm8988_set_bias_level(&wm8988->codec, SND_SOC_BIAS_STANDBY);
921
922 wm8988_dai.dev = codec->dev;
923
924 wm8988_codec = codec;
925
926 ret = snd_soc_register_codec(codec);
927 if (ret != 0) {
928 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
929 return ret;
930 }
931
932 ret = snd_soc_register_dai(&wm8988_dai);
933 if (ret != 0) {
934 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
935 snd_soc_unregister_codec(codec);
936 return ret;
937 }
938
939 return 0;
940
941err:
942 kfree(wm8988);
943 return ret;
944}
945
946static void wm8988_unregister(struct wm8988_priv *wm8988)
947{
948 wm8988_set_bias_level(&wm8988->codec, SND_SOC_BIAS_OFF);
949 snd_soc_unregister_dai(&wm8988_dai);
950 snd_soc_unregister_codec(&wm8988->codec);
951 kfree(wm8988);
952 wm8988_codec = NULL;
953}
954
955#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
956static int wm8988_i2c_probe(struct i2c_client *i2c,
957 const struct i2c_device_id *id)
958{
959 struct wm8988_priv *wm8988;
960 struct snd_soc_codec *codec;
961
962 wm8988 = kzalloc(sizeof(struct wm8988_priv), GFP_KERNEL);
963 if (wm8988 == NULL)
964 return -ENOMEM;
965
966 codec = &wm8988->codec;
967 codec->hw_write = (hw_write_t)i2c_master_send;
968
969 i2c_set_clientdata(i2c, wm8988);
970 codec->control_data = i2c;
971
972 codec->dev = &i2c->dev;
973
974 return wm8988_register(wm8988);
975}
976
977static int wm8988_i2c_remove(struct i2c_client *client)
978{
979 struct wm8988_priv *wm8988 = i2c_get_clientdata(client);
980 wm8988_unregister(wm8988);
981 return 0;
982}
983
984static const struct i2c_device_id wm8988_i2c_id[] = {
985 { "wm8988", 0 },
986 { }
987};
988MODULE_DEVICE_TABLE(i2c, wm8988_i2c_id);
989
990static struct i2c_driver wm8988_i2c_driver = {
991 .driver = {
992 .name = "WM8988",
993 .owner = THIS_MODULE,
994 },
995 .probe = wm8988_i2c_probe,
996 .remove = wm8988_i2c_remove,
997 .id_table = wm8988_i2c_id,
998};
999#endif
1000
1001#if defined(CONFIG_SPI_MASTER)
1002static int wm8988_spi_write(struct spi_device *spi, const char *data, int len)
1003{
1004 struct spi_transfer t;
1005 struct spi_message m;
1006 u8 msg[2];
1007
1008 if (len <= 0)
1009 return 0;
1010
1011 msg[0] = data[0];
1012 msg[1] = data[1];
1013
1014 spi_message_init(&m);
1015 memset(&t, 0, (sizeof t));
1016
1017 t.tx_buf = &msg[0];
1018 t.len = len;
1019
1020 spi_message_add_tail(&t, &m);
1021 spi_sync(spi, &m);
1022
1023 return len;
1024}
1025
1026static int __devinit wm8988_spi_probe(struct spi_device *spi)
1027{
1028 struct wm8988_priv *wm8988;
1029 struct snd_soc_codec *codec;
1030
1031 wm8988 = kzalloc(sizeof(struct wm8988_priv), GFP_KERNEL);
1032 if (wm8988 == NULL)
1033 return -ENOMEM;
1034
1035 codec = &wm8988->codec;
1036 codec->hw_write = (hw_write_t)wm8988_spi_write;
1037 codec->control_data = spi;
1038 codec->dev = &spi->dev;
1039
1040 spi->dev.driver_data = wm8988;
1041
1042 return wm8988_register(wm8988);
1043}
1044
1045static int __devexit wm8988_spi_remove(struct spi_device *spi)
1046{
1047 struct wm8988_priv *wm8988 = spi->dev.driver_data;
1048
1049 wm8988_unregister(wm8988);
1050
1051 return 0;
1052}
1053
1054static struct spi_driver wm8988_spi_driver = {
1055 .driver = {
1056 .name = "wm8988",
1057 .bus = &spi_bus_type,
1058 .owner = THIS_MODULE,
1059 },
1060 .probe = wm8988_spi_probe,
1061 .remove = __devexit_p(wm8988_spi_remove),
1062};
1063#endif
1064
1065static int __init wm8988_modinit(void)
1066{
1067 int ret;
1068
1069#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
1070 ret = i2c_add_driver(&wm8988_i2c_driver);
1071 if (ret != 0)
1072 pr_err("WM8988: Unable to register I2C driver: %d\n", ret);
1073#endif
1074#if defined(CONFIG_SPI_MASTER)
1075 ret = spi_register_driver(&wm8988_spi_driver);
1076 if (ret != 0)
1077 pr_err("WM8988: Unable to register SPI driver: %d\n", ret);
1078#endif
1079 return ret;
1080}
1081module_init(wm8988_modinit);
1082
1083static void __exit wm8988_exit(void)
1084{
1085#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
1086 i2c_del_driver(&wm8988_i2c_driver);
1087#endif
1088#if defined(CONFIG_SPI_MASTER)
1089 spi_unregister_driver(&wm8988_spi_driver);
1090#endif
1091}
1092module_exit(wm8988_exit);
1093
1094
1095MODULE_DESCRIPTION("ASoC WM8988 driver");
1096MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
1097MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8988.h b/sound/soc/codecs/wm8988.h
new file mode 100644
index 000000000000..4552d37fdd41
--- /dev/null
+++ b/sound/soc/codecs/wm8988.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2005 Openedhand Ltd.
3 *
4 * Author: Richard Purdie <richard@openedhand.com>
5 *
6 * Based on WM8753.h
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#ifndef _WM8988_H
15#define _WM8988_H
16
17/* WM8988 register space */
18
19#define WM8988_LINVOL 0x00
20#define WM8988_RINVOL 0x01
21#define WM8988_LOUT1V 0x02
22#define WM8988_ROUT1V 0x03
23#define WM8988_ADCDAC 0x05
24#define WM8988_IFACE 0x07
25#define WM8988_SRATE 0x08
26#define WM8988_LDAC 0x0a
27#define WM8988_RDAC 0x0b
28#define WM8988_BASS 0x0c
29#define WM8988_TREBLE 0x0d
30#define WM8988_RESET 0x0f
31#define WM8988_3D 0x10
32#define WM8988_ALC1 0x11
33#define WM8988_ALC2 0x12
34#define WM8988_ALC3 0x13
35#define WM8988_NGATE 0x14
36#define WM8988_LADC 0x15
37#define WM8988_RADC 0x16
38#define WM8988_ADCTL1 0x17
39#define WM8988_ADCTL2 0x18
40#define WM8988_PWR1 0x19
41#define WM8988_PWR2 0x1a
42#define WM8988_ADCTL3 0x1b
43#define WM8988_ADCIN 0x1f
44#define WM8988_LADCIN 0x20
45#define WM8988_RADCIN 0x21
46#define WM8988_LOUTM1 0x22
47#define WM8988_LOUTM2 0x23
48#define WM8988_ROUTM1 0x24
49#define WM8988_ROUTM2 0x25
50#define WM8988_LOUT2V 0x28
51#define WM8988_ROUT2V 0x29
52#define WM8988_LPPB 0x43
53#define WM8988_NUM_REG 0x44
54
55#define WM8988_SYSCLK 0
56
57extern struct snd_soc_dai wm8988_dai;
58extern struct snd_soc_codec_device soc_codec_dev_wm8988;
59
60#endif
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 40cd274eb1ef..d029818350e9 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -998,7 +998,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int target,
998 998
999 if ((Ndiv < 6) || (Ndiv > 12)) 999 if ((Ndiv < 6) || (Ndiv > 12))
1000 printk(KERN_WARNING 1000 printk(KERN_WARNING
1001 "WM8990 N value outwith recommended range! N = %d\n", Ndiv); 1001 "WM8990 N value outwith recommended range! N = %u\n", Ndiv);
1002 1002
1003 pll_div->n = Ndiv; 1003 pll_div->n = Ndiv;
1004 Nmod = target % source; 1004 Nmod = target % source;
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
new file mode 100644
index 000000000000..86fc57e25f97
--- /dev/null
+++ b/sound/soc/codecs/wm9081.c
@@ -0,0 +1,1534 @@
1/*
2 * wm9081.c -- WM9081 ALSA SoC Audio driver
3 *
4 * Author: Mark Brown
5 *
6 * Copyright 2009 Wolfson Microelectronics plc
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/pm.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <sound/core.h>
22#include <sound/pcm.h>
23#include <sound/pcm_params.h>
24#include <sound/soc.h>
25#include <sound/soc-dapm.h>
26#include <sound/initval.h>
27#include <sound/tlv.h>
28
29#include <sound/wm9081.h>
30#include "wm9081.h"
31
32static u16 wm9081_reg_defaults[] = {
33 0x0000, /* R0 - Software Reset */
34 0x0000, /* R1 */
35 0x00B9, /* R2 - Analogue Lineout */
36 0x00B9, /* R3 - Analogue Speaker PGA */
37 0x0001, /* R4 - VMID Control */
38 0x0068, /* R5 - Bias Control 1 */
39 0x0000, /* R6 */
40 0x0000, /* R7 - Analogue Mixer */
41 0x0000, /* R8 - Anti Pop Control */
42 0x01DB, /* R9 - Analogue Speaker 1 */
43 0x0018, /* R10 - Analogue Speaker 2 */
44 0x0180, /* R11 - Power Management */
45 0x0000, /* R12 - Clock Control 1 */
46 0x0038, /* R13 - Clock Control 2 */
47 0x4000, /* R14 - Clock Control 3 */
48 0x0000, /* R15 */
49 0x0000, /* R16 - FLL Control 1 */
50 0x0200, /* R17 - FLL Control 2 */
51 0x0000, /* R18 - FLL Control 3 */
52 0x0204, /* R19 - FLL Control 4 */
53 0x0000, /* R20 - FLL Control 5 */
54 0x0000, /* R21 */
55 0x0000, /* R22 - Audio Interface 1 */
56 0x0002, /* R23 - Audio Interface 2 */
57 0x0008, /* R24 - Audio Interface 3 */
58 0x0022, /* R25 - Audio Interface 4 */
59 0x0000, /* R26 - Interrupt Status */
60 0x0006, /* R27 - Interrupt Status Mask */
61 0x0000, /* R28 - Interrupt Polarity */
62 0x0000, /* R29 - Interrupt Control */
63 0x00C0, /* R30 - DAC Digital 1 */
64 0x0008, /* R31 - DAC Digital 2 */
65 0x09AF, /* R32 - DRC 1 */
66 0x4201, /* R33 - DRC 2 */
67 0x0000, /* R34 - DRC 3 */
68 0x0000, /* R35 - DRC 4 */
69 0x0000, /* R36 */
70 0x0000, /* R37 */
71 0x0000, /* R38 - Write Sequencer 1 */
72 0x0000, /* R39 - Write Sequencer 2 */
73 0x0002, /* R40 - MW Slave 1 */
74 0x0000, /* R41 */
75 0x0000, /* R42 - EQ 1 */
76 0x0000, /* R43 - EQ 2 */
77 0x0FCA, /* R44 - EQ 3 */
78 0x0400, /* R45 - EQ 4 */
79 0x00B8, /* R46 - EQ 5 */
80 0x1EB5, /* R47 - EQ 6 */
81 0xF145, /* R48 - EQ 7 */
82 0x0B75, /* R49 - EQ 8 */
83 0x01C5, /* R50 - EQ 9 */
84 0x169E, /* R51 - EQ 10 */
85 0xF829, /* R52 - EQ 11 */
86 0x07AD, /* R53 - EQ 12 */
87 0x1103, /* R54 - EQ 13 */
88 0x1C58, /* R55 - EQ 14 */
89 0xF373, /* R56 - EQ 15 */
90 0x0A54, /* R57 - EQ 16 */
91 0x0558, /* R58 - EQ 17 */
92 0x0564, /* R59 - EQ 18 */
93 0x0559, /* R60 - EQ 19 */
94 0x4000, /* R61 - EQ 20 */
95};
96
97static struct {
98 int ratio;
99 int clk_sys_rate;
100} clk_sys_rates[] = {
101 { 64, 0 },
102 { 128, 1 },
103 { 192, 2 },
104 { 256, 3 },
105 { 384, 4 },
106 { 512, 5 },
107 { 768, 6 },
108 { 1024, 7 },
109 { 1408, 8 },
110 { 1536, 9 },
111};
112
113static struct {
114 int rate;
115 int sample_rate;
116} sample_rates[] = {
117 { 8000, 0 },
118 { 11025, 1 },
119 { 12000, 2 },
120 { 16000, 3 },
121 { 22050, 4 },
122 { 24000, 5 },
123 { 32000, 6 },
124 { 44100, 7 },
125 { 48000, 8 },
126 { 88200, 9 },
127 { 96000, 10 },
128};
129
130static struct {
131 int div; /* *10 due to .5s */
132 int bclk_div;
133} bclk_divs[] = {
134 { 10, 0 },
135 { 15, 1 },
136 { 20, 2 },
137 { 30, 3 },
138 { 40, 4 },
139 { 50, 5 },
140 { 55, 6 },
141 { 60, 7 },
142 { 80, 8 },
143 { 100, 9 },
144 { 110, 10 },
145 { 120, 11 },
146 { 160, 12 },
147 { 200, 13 },
148 { 220, 14 },
149 { 240, 15 },
150 { 250, 16 },
151 { 300, 17 },
152 { 320, 18 },
153 { 440, 19 },
154 { 480, 20 },
155};
156
157struct wm9081_priv {
158 struct snd_soc_codec codec;
159 u16 reg_cache[WM9081_MAX_REGISTER + 1];
160 int sysclk_source;
161 int mclk_rate;
162 int sysclk_rate;
163 int fs;
164 int bclk;
165 int master;
166 int fll_fref;
167 int fll_fout;
168 struct wm9081_retune_mobile_config *retune;
169};
170
171static int wm9081_reg_is_volatile(int reg)
172{
173 switch (reg) {
174 default:
175 return 0;
176 }
177}
178
179static unsigned int wm9081_read_reg_cache(struct snd_soc_codec *codec,
180 unsigned int reg)
181{
182 u16 *cache = codec->reg_cache;
183 BUG_ON(reg > WM9081_MAX_REGISTER);
184 return cache[reg];
185}
186
187static unsigned int wm9081_read_hw(struct snd_soc_codec *codec, u8 reg)
188{
189 struct i2c_msg xfer[2];
190 u16 data;
191 int ret;
192 struct i2c_client *client = codec->control_data;
193
194 BUG_ON(reg > WM9081_MAX_REGISTER);
195
196 /* Write register */
197 xfer[0].addr = client->addr;
198 xfer[0].flags = 0;
199 xfer[0].len = 1;
200 xfer[0].buf = &reg;
201
202 /* Read data */
203 xfer[1].addr = client->addr;
204 xfer[1].flags = I2C_M_RD;
205 xfer[1].len = 2;
206 xfer[1].buf = (u8 *)&data;
207
208 ret = i2c_transfer(client->adapter, xfer, 2);
209 if (ret != 2) {
210 dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
211 return 0;
212 }
213
214 return (data >> 8) | ((data & 0xff) << 8);
215}
216
217static unsigned int wm9081_read(struct snd_soc_codec *codec, unsigned int reg)
218{
219 if (wm9081_reg_is_volatile(reg))
220 return wm9081_read_hw(codec, reg);
221 else
222 return wm9081_read_reg_cache(codec, reg);
223}
224
225static int wm9081_write(struct snd_soc_codec *codec, unsigned int reg,
226 unsigned int value)
227{
228 u16 *cache = codec->reg_cache;
229 u8 data[3];
230
231 BUG_ON(reg > WM9081_MAX_REGISTER);
232
233 if (!wm9081_reg_is_volatile(reg))
234 cache[reg] = value;
235
236 data[0] = reg;
237 data[1] = value >> 8;
238 data[2] = value & 0x00ff;
239
240 if (codec->hw_write(codec->control_data, data, 3) == 3)
241 return 0;
242 else
243 return -EIO;
244}
245
246static int wm9081_reset(struct snd_soc_codec *codec)
247{
248 return wm9081_write(codec, WM9081_SOFTWARE_RESET, 0);
249}
250
251static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0);
252static const DECLARE_TLV_DB_SCALE(drc_out_tlv, -2250, 75, 0);
253static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
254static unsigned int drc_max_tlv[] = {
255 TLV_DB_RANGE_HEAD(4),
256 0, 0, TLV_DB_SCALE_ITEM(1200, 0, 0),
257 1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
258 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
259 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
260};
261static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
262static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -300, 50, 0);
263
264static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
265
266static const DECLARE_TLV_DB_SCALE(in_tlv, -600, 600, 0);
267static const DECLARE_TLV_DB_SCALE(dac_tlv, -7200, 75, 1);
268static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
269
270static const char *drc_high_text[] = {
271 "1",
272 "1/2",
273 "1/4",
274 "1/8",
275 "1/16",
276 "0",
277};
278
279static const struct soc_enum drc_high =
280 SOC_ENUM_SINGLE(WM9081_DRC_3, 3, 6, drc_high_text);
281
282static const char *drc_low_text[] = {
283 "1",
284 "1/2",
285 "1/4",
286 "1/8",
287 "0",
288};
289
290static const struct soc_enum drc_low =
291 SOC_ENUM_SINGLE(WM9081_DRC_3, 0, 5, drc_low_text);
292
293static const char *drc_atk_text[] = {
294 "181us",
295 "181us",
296 "363us",
297 "726us",
298 "1.45ms",
299 "2.9ms",
300 "5.8ms",
301 "11.6ms",
302 "23.2ms",
303 "46.4ms",
304 "92.8ms",
305 "185.6ms",
306};
307
308static const struct soc_enum drc_atk =
309 SOC_ENUM_SINGLE(WM9081_DRC_2, 12, 12, drc_atk_text);
310
311static const char *drc_dcy_text[] = {
312 "186ms",
313 "372ms",
314 "743ms",
315 "1.49s",
316 "2.97s",
317 "5.94s",
318 "11.89s",
319 "23.78s",
320 "47.56s",
321};
322
323static const struct soc_enum drc_dcy =
324 SOC_ENUM_SINGLE(WM9081_DRC_2, 8, 9, drc_dcy_text);
325
326static const char *drc_qr_dcy_text[] = {
327 "0.725ms",
328 "1.45ms",
329 "5.8ms",
330};
331
332static const struct soc_enum drc_qr_dcy =
333 SOC_ENUM_SINGLE(WM9081_DRC_2, 4, 3, drc_qr_dcy_text);
334
335static const char *dac_deemph_text[] = {
336 "None",
337 "32kHz",
338 "44.1kHz",
339 "48kHz",
340};
341
342static const struct soc_enum dac_deemph =
343 SOC_ENUM_SINGLE(WM9081_DAC_DIGITAL_2, 1, 4, dac_deemph_text);
344
345static const char *speaker_mode_text[] = {
346 "Class D",
347 "Class AB",
348};
349
350static const struct soc_enum speaker_mode =
351 SOC_ENUM_SINGLE(WM9081_ANALOGUE_SPEAKER_2, 6, 2, speaker_mode_text);
352
353static int speaker_mode_get(struct snd_kcontrol *kcontrol,
354 struct snd_ctl_elem_value *ucontrol)
355{
356 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
357 unsigned int reg;
358
359 reg = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_2);
360 if (reg & WM9081_SPK_MODE)
361 ucontrol->value.integer.value[0] = 1;
362 else
363 ucontrol->value.integer.value[0] = 0;
364
365 return 0;
366}
367
368/*
369 * Stop any attempts to change speaker mode while the speaker is enabled.
370 *
371 * We also have some special anti-pop controls dependant on speaker
372 * mode which must be changed along with the mode.
373 */
374static int speaker_mode_put(struct snd_kcontrol *kcontrol,
375 struct snd_ctl_elem_value *ucontrol)
376{
377 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
378 unsigned int reg_pwr = wm9081_read(codec, WM9081_POWER_MANAGEMENT);
379 unsigned int reg2 = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_2);
380
381 /* Are we changing anything? */
382 if (ucontrol->value.integer.value[0] ==
383 ((reg2 & WM9081_SPK_MODE) != 0))
384 return 0;
385
386 /* Don't try to change modes while enabled */
387 if (reg_pwr & WM9081_SPK_ENA)
388 return -EINVAL;
389
390 if (ucontrol->value.integer.value[0]) {
391 /* Class AB */
392 reg2 &= ~(WM9081_SPK_INV_MUTE | WM9081_OUT_SPK_CTRL);
393 reg2 |= WM9081_SPK_MODE;
394 } else {
395 /* Class D */
396 reg2 |= WM9081_SPK_INV_MUTE | WM9081_OUT_SPK_CTRL;
397 reg2 &= ~WM9081_SPK_MODE;
398 }
399
400 wm9081_write(codec, WM9081_ANALOGUE_SPEAKER_2, reg2);
401
402 return 0;
403}
404
405static const struct snd_kcontrol_new wm9081_snd_controls[] = {
406SOC_SINGLE_TLV("IN1 Volume", WM9081_ANALOGUE_MIXER, 1, 1, 1, in_tlv),
407SOC_SINGLE_TLV("IN2 Volume", WM9081_ANALOGUE_MIXER, 3, 1, 1, in_tlv),
408
409SOC_SINGLE_TLV("Playback Volume", WM9081_DAC_DIGITAL_1, 1, 96, 0, dac_tlv),
410
411SOC_SINGLE("LINEOUT Switch", WM9081_ANALOGUE_LINEOUT, 7, 1, 1),
412SOC_SINGLE("LINEOUT ZC Switch", WM9081_ANALOGUE_LINEOUT, 6, 1, 0),
413SOC_SINGLE_TLV("LINEOUT Volume", WM9081_ANALOGUE_LINEOUT, 0, 63, 0, out_tlv),
414
415SOC_SINGLE("DRC Switch", WM9081_DRC_1, 15, 1, 0),
416SOC_ENUM("DRC High Slope", drc_high),
417SOC_ENUM("DRC Low Slope", drc_low),
418SOC_SINGLE_TLV("DRC Input Volume", WM9081_DRC_4, 5, 60, 1, drc_in_tlv),
419SOC_SINGLE_TLV("DRC Output Volume", WM9081_DRC_4, 0, 30, 1, drc_out_tlv),
420SOC_SINGLE_TLV("DRC Minimum Volume", WM9081_DRC_2, 2, 3, 1, drc_min_tlv),
421SOC_SINGLE_TLV("DRC Maximum Volume", WM9081_DRC_2, 0, 3, 0, drc_max_tlv),
422SOC_ENUM("DRC Attack", drc_atk),
423SOC_ENUM("DRC Decay", drc_dcy),
424SOC_SINGLE("DRC Quick Release Switch", WM9081_DRC_1, 2, 1, 0),
425SOC_SINGLE_TLV("DRC Quick Release Volume", WM9081_DRC_2, 6, 3, 0, drc_qr_tlv),
426SOC_ENUM("DRC Quick Release Decay", drc_qr_dcy),
427SOC_SINGLE_TLV("DRC Startup Volume", WM9081_DRC_1, 6, 18, 0, drc_startup_tlv),
428
429SOC_SINGLE("EQ Switch", WM9081_EQ_1, 0, 1, 0),
430
431SOC_SINGLE("Speaker DC Volume", WM9081_ANALOGUE_SPEAKER_1, 3, 5, 0),
432SOC_SINGLE("Speaker AC Volume", WM9081_ANALOGUE_SPEAKER_1, 0, 5, 0),
433SOC_SINGLE("Speaker Switch", WM9081_ANALOGUE_SPEAKER_PGA, 7, 1, 1),
434SOC_SINGLE("Speaker ZC Switch", WM9081_ANALOGUE_SPEAKER_PGA, 6, 1, 0),
435SOC_SINGLE_TLV("Speaker Volume", WM9081_ANALOGUE_SPEAKER_PGA, 0, 63, 0,
436 out_tlv),
437SOC_ENUM("DAC Deemphasis", dac_deemph),
438SOC_ENUM_EXT("Speaker Mode", speaker_mode, speaker_mode_get, speaker_mode_put),
439};
440
441static const struct snd_kcontrol_new wm9081_eq_controls[] = {
442SOC_SINGLE_TLV("EQ1 Volume", WM9081_EQ_1, 11, 24, 0, eq_tlv),
443SOC_SINGLE_TLV("EQ2 Volume", WM9081_EQ_1, 6, 24, 0, eq_tlv),
444SOC_SINGLE_TLV("EQ3 Volume", WM9081_EQ_1, 1, 24, 0, eq_tlv),
445SOC_SINGLE_TLV("EQ4 Volume", WM9081_EQ_2, 11, 24, 0, eq_tlv),
446SOC_SINGLE_TLV("EQ5 Volume", WM9081_EQ_2, 6, 24, 0, eq_tlv),
447};
448
449static const struct snd_kcontrol_new mixer[] = {
450SOC_DAPM_SINGLE("IN1 Switch", WM9081_ANALOGUE_MIXER, 0, 1, 0),
451SOC_DAPM_SINGLE("IN2 Switch", WM9081_ANALOGUE_MIXER, 2, 1, 0),
452SOC_DAPM_SINGLE("Playback Switch", WM9081_ANALOGUE_MIXER, 4, 1, 0),
453};
454
455static int speaker_event(struct snd_soc_dapm_widget *w,
456 struct snd_kcontrol *kcontrol, int event)
457{
458 struct snd_soc_codec *codec = w->codec;
459 unsigned int reg = wm9081_read(codec, WM9081_POWER_MANAGEMENT);
460
461 switch (event) {
462 case SND_SOC_DAPM_POST_PMU:
463 reg |= WM9081_SPK_ENA;
464 break;
465
466 case SND_SOC_DAPM_PRE_PMD:
467 reg &= ~WM9081_SPK_ENA;
468 break;
469 }
470
471 wm9081_write(codec, WM9081_POWER_MANAGEMENT, reg);
472
473 return 0;
474}
475
476struct _fll_div {
477 u16 fll_fratio;
478 u16 fll_outdiv;
479 u16 fll_clk_ref_div;
480 u16 n;
481 u16 k;
482};
483
484/* The size in bits of the FLL divide multiplied by 10
485 * to allow rounding later */
486#define FIXED_FLL_SIZE ((1 << 16) * 10)
487
488static struct {
489 unsigned int min;
490 unsigned int max;
491 u16 fll_fratio;
492 int ratio;
493} fll_fratios[] = {
494 { 0, 64000, 4, 16 },
495 { 64000, 128000, 3, 8 },
496 { 128000, 256000, 2, 4 },
497 { 256000, 1000000, 1, 2 },
498 { 1000000, 13500000, 0, 1 },
499};
500
501static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
502 unsigned int Fout)
503{
504 u64 Kpart;
505 unsigned int K, Ndiv, Nmod, target;
506 unsigned int div;
507 int i;
508
509 /* Fref must be <=13.5MHz */
510 div = 1;
511 while ((Fref / div) > 13500000) {
512 div *= 2;
513
514 if (div > 8) {
515 pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
516 Fref);
517 return -EINVAL;
518 }
519 }
520 fll_div->fll_clk_ref_div = div / 2;
521
522 pr_debug("Fref=%u Fout=%u\n", Fref, Fout);
523
524 /* Apply the division for our remaining calculations */
525 Fref /= div;
526
527 /* Fvco should be 90-100MHz; don't check the upper bound */
528 div = 0;
529 target = Fout * 2;
530 while (target < 90000000) {
531 div++;
532 target *= 2;
533 if (div > 7) {
534 pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
535 Fout);
536 return -EINVAL;
537 }
538 }
539 fll_div->fll_outdiv = div;
540
541 pr_debug("Fvco=%dHz\n", target);
542
543 /* Find an appropraite FLL_FRATIO and factor it out of the target */
544 for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
545 if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
546 fll_div->fll_fratio = fll_fratios[i].fll_fratio;
547 target /= fll_fratios[i].ratio;
548 break;
549 }
550 }
551 if (i == ARRAY_SIZE(fll_fratios)) {
552 pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
553 return -EINVAL;
554 }
555
556 /* Now, calculate N.K */
557 Ndiv = target / Fref;
558
559 fll_div->n = Ndiv;
560 Nmod = target % Fref;
561 pr_debug("Nmod=%d\n", Nmod);
562
563 /* Calculate fractional part - scale up so we can round. */
564 Kpart = FIXED_FLL_SIZE * (long long)Nmod;
565
566 do_div(Kpart, Fref);
567
568 K = Kpart & 0xFFFFFFFF;
569
570 if ((K % 10) >= 5)
571 K += 5;
572
573 /* Move down to proper range now rounding is done */
574 fll_div->k = K / 10;
575
576 pr_debug("N=%x K=%x FLL_FRATIO=%x FLL_OUTDIV=%x FLL_CLK_REF_DIV=%x\n",
577 fll_div->n, fll_div->k,
578 fll_div->fll_fratio, fll_div->fll_outdiv,
579 fll_div->fll_clk_ref_div);
580
581 return 0;
582}
583
584static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
585 unsigned int Fref, unsigned int Fout)
586{
587 struct wm9081_priv *wm9081 = codec->private_data;
588 u16 reg1, reg4, reg5;
589 struct _fll_div fll_div;
590 int ret;
591 int clk_sys_reg;
592
593 /* Any change? */
594 if (Fref == wm9081->fll_fref && Fout == wm9081->fll_fout)
595 return 0;
596
597 /* Disable the FLL */
598 if (Fout == 0) {
599 dev_dbg(codec->dev, "FLL disabled\n");
600 wm9081->fll_fref = 0;
601 wm9081->fll_fout = 0;
602
603 return 0;
604 }
605
606 ret = fll_factors(&fll_div, Fref, Fout);
607 if (ret != 0)
608 return ret;
609
610 reg5 = wm9081_read(codec, WM9081_FLL_CONTROL_5);
611 reg5 &= ~WM9081_FLL_CLK_SRC_MASK;
612
613 switch (fll_id) {
614 case WM9081_SYSCLK_FLL_MCLK:
615 reg5 |= 0x1;
616 break;
617
618 default:
619 dev_err(codec->dev, "Unknown FLL ID %d\n", fll_id);
620 return -EINVAL;
621 }
622
623 /* Disable CLK_SYS while we reconfigure */
624 clk_sys_reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_3);
625 if (clk_sys_reg & WM9081_CLK_SYS_ENA)
626 wm9081_write(codec, WM9081_CLOCK_CONTROL_3,
627 clk_sys_reg & ~WM9081_CLK_SYS_ENA);
628
629 /* Any FLL configuration change requires that the FLL be
630 * disabled first. */
631 reg1 = wm9081_read(codec, WM9081_FLL_CONTROL_1);
632 reg1 &= ~WM9081_FLL_ENA;
633 wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1);
634
635 /* Apply the configuration */
636 if (fll_div.k)
637 reg1 |= WM9081_FLL_FRAC_MASK;
638 else
639 reg1 &= ~WM9081_FLL_FRAC_MASK;
640 wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1);
641
642 wm9081_write(codec, WM9081_FLL_CONTROL_2,
643 (fll_div.fll_outdiv << WM9081_FLL_OUTDIV_SHIFT) |
644 (fll_div.fll_fratio << WM9081_FLL_FRATIO_SHIFT));
645 wm9081_write(codec, WM9081_FLL_CONTROL_3, fll_div.k);
646
647 reg4 = wm9081_read(codec, WM9081_FLL_CONTROL_4);
648 reg4 &= ~WM9081_FLL_N_MASK;
649 reg4 |= fll_div.n << WM9081_FLL_N_SHIFT;
650 wm9081_write(codec, WM9081_FLL_CONTROL_4, reg4);
651
652 reg5 &= ~WM9081_FLL_CLK_REF_DIV_MASK;
653 reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT;
654 wm9081_write(codec, WM9081_FLL_CONTROL_5, reg5);
655
656 /* Enable the FLL */
657 wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
658
659 /* Then bring CLK_SYS up again if it was disabled */
660 if (clk_sys_reg & WM9081_CLK_SYS_ENA)
661 wm9081_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg);
662
663 dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
664
665 wm9081->fll_fref = Fref;
666 wm9081->fll_fout = Fout;
667
668 return 0;
669}
670
671static int configure_clock(struct snd_soc_codec *codec)
672{
673 struct wm9081_priv *wm9081 = codec->private_data;
674 int new_sysclk, i, target;
675 unsigned int reg;
676 int ret = 0;
677 int mclkdiv = 0;
678 int fll = 0;
679
680 switch (wm9081->sysclk_source) {
681 case WM9081_SYSCLK_MCLK:
682 if (wm9081->mclk_rate > 12225000) {
683 mclkdiv = 1;
684 wm9081->sysclk_rate = wm9081->mclk_rate / 2;
685 } else {
686 wm9081->sysclk_rate = wm9081->mclk_rate;
687 }
688 wm9081_set_fll(codec, WM9081_SYSCLK_FLL_MCLK, 0, 0);
689 break;
690
691 case WM9081_SYSCLK_FLL_MCLK:
692 /* If we have a sample rate calculate a CLK_SYS that
693 * gives us a suitable DAC configuration, plus BCLK.
694 * Ideally we would check to see if we can clock
695 * directly from MCLK and only use the FLL if this is
696 * not the case, though care must be taken with free
697 * running mode.
698 */
699 if (wm9081->master && wm9081->bclk) {
700 /* Make sure we can generate CLK_SYS and BCLK
701 * and that we've got 3MHz for optimal
702 * performance. */
703 for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) {
704 target = wm9081->fs * clk_sys_rates[i].ratio;
705 new_sysclk = target;
706 if (target >= wm9081->bclk &&
707 target > 3000000)
708 break;
709 }
710 } else if (wm9081->fs) {
711 for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) {
712 new_sysclk = clk_sys_rates[i].ratio
713 * wm9081->fs;
714 if (new_sysclk > 3000000)
715 break;
716 }
717 } else {
718 new_sysclk = 12288000;
719 }
720
721 ret = wm9081_set_fll(codec, WM9081_SYSCLK_FLL_MCLK,
722 wm9081->mclk_rate, new_sysclk);
723 if (ret == 0) {
724 wm9081->sysclk_rate = new_sysclk;
725
726 /* Switch SYSCLK over to FLL */
727 fll = 1;
728 } else {
729 wm9081->sysclk_rate = wm9081->mclk_rate;
730 }
731 break;
732
733 default:
734 return -EINVAL;
735 }
736
737 reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_1);
738 if (mclkdiv)
739 reg |= WM9081_MCLKDIV2;
740 else
741 reg &= ~WM9081_MCLKDIV2;
742 wm9081_write(codec, WM9081_CLOCK_CONTROL_1, reg);
743
744 reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_3);
745 if (fll)
746 reg |= WM9081_CLK_SRC_SEL;
747 else
748 reg &= ~WM9081_CLK_SRC_SEL;
749 wm9081_write(codec, WM9081_CLOCK_CONTROL_3, reg);
750
751 dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm9081->sysclk_rate);
752
753 return ret;
754}
755
756static int clk_sys_event(struct snd_soc_dapm_widget *w,
757 struct snd_kcontrol *kcontrol, int event)
758{
759 struct snd_soc_codec *codec = w->codec;
760 struct wm9081_priv *wm9081 = codec->private_data;
761
762 /* This should be done on init() for bypass paths */
763 switch (wm9081->sysclk_source) {
764 case WM9081_SYSCLK_MCLK:
765 dev_dbg(codec->dev, "Using %dHz MCLK\n", wm9081->mclk_rate);
766 break;
767 case WM9081_SYSCLK_FLL_MCLK:
768 dev_dbg(codec->dev, "Using %dHz MCLK with FLL\n",
769 wm9081->mclk_rate);
770 break;
771 default:
772 dev_err(codec->dev, "System clock not configured\n");
773 return -EINVAL;
774 }
775
776 switch (event) {
777 case SND_SOC_DAPM_PRE_PMU:
778 configure_clock(codec);
779 break;
780
781 case SND_SOC_DAPM_POST_PMD:
782 /* Disable the FLL if it's running */
783 wm9081_set_fll(codec, 0, 0, 0);
784 break;
785 }
786
787 return 0;
788}
789
790static const struct snd_soc_dapm_widget wm9081_dapm_widgets[] = {
791SND_SOC_DAPM_INPUT("IN1"),
792SND_SOC_DAPM_INPUT("IN2"),
793
794SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM9081_POWER_MANAGEMENT, 0, 0),
795
796SND_SOC_DAPM_MIXER_NAMED_CTL("Mixer", SND_SOC_NOPM, 0, 0,
797 mixer, ARRAY_SIZE(mixer)),
798
799SND_SOC_DAPM_PGA("LINEOUT PGA", WM9081_POWER_MANAGEMENT, 4, 0, NULL, 0),
800
801SND_SOC_DAPM_PGA_E("Speaker PGA", WM9081_POWER_MANAGEMENT, 2, 0, NULL, 0,
802 speaker_event,
803 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
804
805SND_SOC_DAPM_OUTPUT("LINEOUT"),
806SND_SOC_DAPM_OUTPUT("SPKN"),
807SND_SOC_DAPM_OUTPUT("SPKP"),
808
809SND_SOC_DAPM_SUPPLY("CLK_SYS", WM9081_CLOCK_CONTROL_3, 0, 0, clk_sys_event,
810 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
811SND_SOC_DAPM_SUPPLY("CLK_DSP", WM9081_CLOCK_CONTROL_3, 1, 0, NULL, 0),
812SND_SOC_DAPM_SUPPLY("TOCLK", WM9081_CLOCK_CONTROL_3, 2, 0, NULL, 0),
813};
814
815
816static const struct snd_soc_dapm_route audio_paths[] = {
817 { "DAC", NULL, "CLK_SYS" },
818 { "DAC", NULL, "CLK_DSP" },
819
820 { "Mixer", "IN1 Switch", "IN1" },
821 { "Mixer", "IN2 Switch", "IN2" },
822 { "Mixer", "Playback Switch", "DAC" },
823
824 { "LINEOUT PGA", NULL, "Mixer" },
825 { "LINEOUT PGA", NULL, "TOCLK" },
826 { "LINEOUT PGA", NULL, "CLK_SYS" },
827
828 { "LINEOUT", NULL, "LINEOUT PGA" },
829
830 { "Speaker PGA", NULL, "Mixer" },
831 { "Speaker PGA", NULL, "TOCLK" },
832 { "Speaker PGA", NULL, "CLK_SYS" },
833
834 { "SPKN", NULL, "Speaker PGA" },
835 { "SPKP", NULL, "Speaker PGA" },
836};
837
838static int wm9081_set_bias_level(struct snd_soc_codec *codec,
839 enum snd_soc_bias_level level)
840{
841 u16 reg;
842
843 switch (level) {
844 case SND_SOC_BIAS_ON:
845 break;
846
847 case SND_SOC_BIAS_PREPARE:
848 /* VMID=2*40k */
849 reg = wm9081_read(codec, WM9081_VMID_CONTROL);
850 reg &= ~WM9081_VMID_SEL_MASK;
851 reg |= 0x2;
852 wm9081_write(codec, WM9081_VMID_CONTROL, reg);
853
854 /* Normal bias current */
855 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
856 reg &= ~WM9081_STBY_BIAS_ENA;
857 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
858 break;
859
860 case SND_SOC_BIAS_STANDBY:
861 /* Initial cold start */
862 if (codec->bias_level == SND_SOC_BIAS_OFF) {
863 /* Disable LINEOUT discharge */
864 reg = wm9081_read(codec, WM9081_ANTI_POP_CONTROL);
865 reg &= ~WM9081_LINEOUT_DISCH;
866 wm9081_write(codec, WM9081_ANTI_POP_CONTROL, reg);
867
868 /* Select startup bias source */
869 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
870 reg |= WM9081_BIAS_SRC | WM9081_BIAS_ENA;
871 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
872
873 /* VMID 2*4k; Soft VMID ramp enable */
874 reg = wm9081_read(codec, WM9081_VMID_CONTROL);
875 reg |= WM9081_VMID_RAMP | 0x6;
876 wm9081_write(codec, WM9081_VMID_CONTROL, reg);
877
878 mdelay(100);
879
880 /* Normal bias enable & soft start off */
881 reg |= WM9081_BIAS_ENA;
882 reg &= ~WM9081_VMID_RAMP;
883 wm9081_write(codec, WM9081_VMID_CONTROL, reg);
884
885 /* Standard bias source */
886 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
887 reg &= ~WM9081_BIAS_SRC;
888 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
889 }
890
891 /* VMID 2*240k */
892 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
893 reg &= ~WM9081_VMID_SEL_MASK;
894 reg |= 0x40;
895 wm9081_write(codec, WM9081_VMID_CONTROL, reg);
896
897 /* Standby bias current on */
898 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
899 reg |= WM9081_STBY_BIAS_ENA;
900 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
901 break;
902
903 case SND_SOC_BIAS_OFF:
904 /* Startup bias source */
905 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
906 reg |= WM9081_BIAS_SRC;
907 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
908
909 /* Disable VMID and biases with soft ramping */
910 reg = wm9081_read(codec, WM9081_VMID_CONTROL);
911 reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
912 reg |= WM9081_VMID_RAMP;
913 wm9081_write(codec, WM9081_VMID_CONTROL, reg);
914
915 /* Actively discharge LINEOUT */
916 reg = wm9081_read(codec, WM9081_ANTI_POP_CONTROL);
917 reg |= WM9081_LINEOUT_DISCH;
918 wm9081_write(codec, WM9081_ANTI_POP_CONTROL, reg);
919 break;
920 }
921
922 codec->bias_level = level;
923
924 return 0;
925}
926
927static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
928 unsigned int fmt)
929{
930 struct snd_soc_codec *codec = dai->codec;
931 struct wm9081_priv *wm9081 = codec->private_data;
932 unsigned int aif2 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_2);
933
934 aif2 &= ~(WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV |
935 WM9081_BCLK_DIR | WM9081_LRCLK_DIR | WM9081_AIF_FMT_MASK);
936
937 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
938 case SND_SOC_DAIFMT_CBS_CFS:
939 wm9081->master = 0;
940 break;
941 case SND_SOC_DAIFMT_CBS_CFM:
942 aif2 |= WM9081_LRCLK_DIR;
943 wm9081->master = 1;
944 break;
945 case SND_SOC_DAIFMT_CBM_CFS:
946 aif2 |= WM9081_BCLK_DIR;
947 wm9081->master = 1;
948 break;
949 case SND_SOC_DAIFMT_CBM_CFM:
950 aif2 |= WM9081_LRCLK_DIR | WM9081_BCLK_DIR;
951 wm9081->master = 1;
952 break;
953 default:
954 return -EINVAL;
955 }
956
957 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
958 case SND_SOC_DAIFMT_DSP_B:
959 aif2 |= WM9081_AIF_LRCLK_INV;
960 case SND_SOC_DAIFMT_DSP_A:
961 aif2 |= 0x3;
962 break;
963 case SND_SOC_DAIFMT_I2S:
964 aif2 |= 0x2;
965 break;
966 case SND_SOC_DAIFMT_RIGHT_J:
967 break;
968 case SND_SOC_DAIFMT_LEFT_J:
969 aif2 |= 0x1;
970 break;
971 default:
972 return -EINVAL;
973 }
974
975 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
976 case SND_SOC_DAIFMT_DSP_A:
977 case SND_SOC_DAIFMT_DSP_B:
978 /* frame inversion not valid for DSP modes */
979 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
980 case SND_SOC_DAIFMT_NB_NF:
981 break;
982 case SND_SOC_DAIFMT_IB_NF:
983 aif2 |= WM9081_AIF_BCLK_INV;
984 break;
985 default:
986 return -EINVAL;
987 }
988 break;
989
990 case SND_SOC_DAIFMT_I2S:
991 case SND_SOC_DAIFMT_RIGHT_J:
992 case SND_SOC_DAIFMT_LEFT_J:
993 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
994 case SND_SOC_DAIFMT_NB_NF:
995 break;
996 case SND_SOC_DAIFMT_IB_IF:
997 aif2 |= WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV;
998 break;
999 case SND_SOC_DAIFMT_IB_NF:
1000 aif2 |= WM9081_AIF_BCLK_INV;
1001 break;
1002 case SND_SOC_DAIFMT_NB_IF:
1003 aif2 |= WM9081_AIF_LRCLK_INV;
1004 break;
1005 default:
1006 return -EINVAL;
1007 }
1008 break;
1009 default:
1010 return -EINVAL;
1011 }
1012
1013 wm9081_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
1014
1015 return 0;
1016}
1017
1018static int wm9081_hw_params(struct snd_pcm_substream *substream,
1019 struct snd_pcm_hw_params *params,
1020 struct snd_soc_dai *dai)
1021{
1022 struct snd_soc_codec *codec = dai->codec;
1023 struct wm9081_priv *wm9081 = codec->private_data;
1024 int ret, i, best, best_val, cur_val;
1025 unsigned int clk_ctrl2, aif1, aif2, aif3, aif4;
1026
1027 clk_ctrl2 = wm9081_read(codec, WM9081_CLOCK_CONTROL_2);
1028 clk_ctrl2 &= ~(WM9081_CLK_SYS_RATE_MASK | WM9081_SAMPLE_RATE_MASK);
1029
1030 aif1 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_1);
1031
1032 aif2 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_2);
1033 aif2 &= ~WM9081_AIF_WL_MASK;
1034
1035 aif3 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_3);
1036 aif3 &= ~WM9081_BCLK_DIV_MASK;
1037
1038 aif4 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_4);
1039 aif4 &= ~WM9081_LRCLK_RATE_MASK;
1040
1041 /* What BCLK do we need? */
1042 wm9081->fs = params_rate(params);
1043 wm9081->bclk = 2 * wm9081->fs;
1044 switch (params_format(params)) {
1045 case SNDRV_PCM_FORMAT_S16_LE:
1046 wm9081->bclk *= 16;
1047 break;
1048 case SNDRV_PCM_FORMAT_S20_3LE:
1049 wm9081->bclk *= 20;
1050 aif2 |= 0x4;
1051 break;
1052 case SNDRV_PCM_FORMAT_S24_LE:
1053 wm9081->bclk *= 24;
1054 aif2 |= 0x8;
1055 break;
1056 case SNDRV_PCM_FORMAT_S32_LE:
1057 wm9081->bclk *= 32;
1058 aif2 |= 0xc;
1059 break;
1060 default:
1061 return -EINVAL;
1062 }
1063
1064 if (aif1 & WM9081_AIFDAC_TDM_MODE_MASK) {
1065 int slots = ((aif1 & WM9081_AIFDAC_TDM_MODE_MASK) >>
1066 WM9081_AIFDAC_TDM_MODE_SHIFT) + 1;
1067 wm9081->bclk *= slots;
1068 }
1069
1070 dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm9081->bclk);
1071
1072 ret = configure_clock(codec);
1073 if (ret != 0)
1074 return ret;
1075
1076 /* Select nearest CLK_SYS_RATE */
1077 best = 0;
1078 best_val = abs((wm9081->sysclk_rate / clk_sys_rates[0].ratio)
1079 - wm9081->fs);
1080 for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
1081 cur_val = abs((wm9081->sysclk_rate /
1082 clk_sys_rates[i].ratio) - wm9081->fs);;
1083 if (cur_val < best_val) {
1084 best = i;
1085 best_val = cur_val;
1086 }
1087 }
1088 dev_dbg(codec->dev, "Selected CLK_SYS_RATIO of %d\n",
1089 clk_sys_rates[best].ratio);
1090 clk_ctrl2 |= (clk_sys_rates[best].clk_sys_rate
1091 << WM9081_CLK_SYS_RATE_SHIFT);
1092
1093 /* SAMPLE_RATE */
1094 best = 0;
1095 best_val = abs(wm9081->fs - sample_rates[0].rate);
1096 for (i = 1; i < ARRAY_SIZE(sample_rates); i++) {
1097 /* Closest match */
1098 cur_val = abs(wm9081->fs - sample_rates[i].rate);
1099 if (cur_val < best_val) {
1100 best = i;
1101 best_val = cur_val;
1102 }
1103 }
1104 dev_dbg(codec->dev, "Selected SAMPLE_RATE of %dHz\n",
1105 sample_rates[best].rate);
1106 clk_ctrl2 |= (sample_rates[best].sample_rate
1107 << WM9081_SAMPLE_RATE_SHIFT);
1108
1109 /* BCLK_DIV */
1110 best = 0;
1111 best_val = INT_MAX;
1112 for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
1113 cur_val = ((wm9081->sysclk_rate * 10) / bclk_divs[i].div)
1114 - wm9081->bclk;
1115 if (cur_val < 0) /* Table is sorted */
1116 break;
1117 if (cur_val < best_val) {
1118 best = i;
1119 best_val = cur_val;
1120 }
1121 }
1122 wm9081->bclk = (wm9081->sysclk_rate * 10) / bclk_divs[best].div;
1123 dev_dbg(codec->dev, "Selected BCLK_DIV of %d for %dHz BCLK\n",
1124 bclk_divs[best].div, wm9081->bclk);
1125 aif3 |= bclk_divs[best].bclk_div;
1126
1127 /* LRCLK is a simple fraction of BCLK */
1128 dev_dbg(codec->dev, "LRCLK_RATE is %d\n", wm9081->bclk / wm9081->fs);
1129 aif4 |= wm9081->bclk / wm9081->fs;
1130
1131 /* Apply a ReTune Mobile configuration if it's in use */
1132 if (wm9081->retune) {
1133 struct wm9081_retune_mobile_config *retune = wm9081->retune;
1134 struct wm9081_retune_mobile_setting *s;
1135 int eq1;
1136
1137 best = 0;
1138 best_val = abs(retune->configs[0].rate - wm9081->fs);
1139 for (i = 0; i < retune->num_configs; i++) {
1140 cur_val = abs(retune->configs[i].rate - wm9081->fs);
1141 if (cur_val < best_val) {
1142 best_val = cur_val;
1143 best = i;
1144 }
1145 }
1146 s = &retune->configs[best];
1147
1148 dev_dbg(codec->dev, "ReTune Mobile %s tuned for %dHz\n",
1149 s->name, s->rate);
1150
1151 /* If the EQ is enabled then disable it while we write out */
1152 eq1 = wm9081_read(codec, WM9081_EQ_1) & WM9081_EQ_ENA;
1153 if (eq1 & WM9081_EQ_ENA)
1154 wm9081_write(codec, WM9081_EQ_1, 0);
1155
1156 /* Write out the other values */
1157 for (i = 1; i < ARRAY_SIZE(s->config); i++)
1158 wm9081_write(codec, WM9081_EQ_1 + i, s->config[i]);
1159
1160 eq1 |= (s->config[0] & ~WM9081_EQ_ENA);
1161 wm9081_write(codec, WM9081_EQ_1, eq1);
1162 }
1163
1164 wm9081_write(codec, WM9081_CLOCK_CONTROL_2, clk_ctrl2);
1165 wm9081_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
1166 wm9081_write(codec, WM9081_AUDIO_INTERFACE_3, aif3);
1167 wm9081_write(codec, WM9081_AUDIO_INTERFACE_4, aif4);
1168
1169 return 0;
1170}
1171
1172static int wm9081_digital_mute(struct snd_soc_dai *codec_dai, int mute)
1173{
1174 struct snd_soc_codec *codec = codec_dai->codec;
1175 unsigned int reg;
1176
1177 reg = wm9081_read(codec, WM9081_DAC_DIGITAL_2);
1178
1179 if (mute)
1180 reg |= WM9081_DAC_MUTE;
1181 else
1182 reg &= ~WM9081_DAC_MUTE;
1183
1184 wm9081_write(codec, WM9081_DAC_DIGITAL_2, reg);
1185
1186 return 0;
1187}
1188
1189static int wm9081_set_sysclk(struct snd_soc_dai *codec_dai,
1190 int clk_id, unsigned int freq, int dir)
1191{
1192 struct snd_soc_codec *codec = codec_dai->codec;
1193 struct wm9081_priv *wm9081 = codec->private_data;
1194
1195 switch (clk_id) {
1196 case WM9081_SYSCLK_MCLK:
1197 case WM9081_SYSCLK_FLL_MCLK:
1198 wm9081->sysclk_source = clk_id;
1199 wm9081->mclk_rate = freq;
1200 break;
1201
1202 default:
1203 return -EINVAL;
1204 }
1205
1206 return 0;
1207}
1208
1209static int wm9081_set_tdm_slot(struct snd_soc_dai *dai,
1210 unsigned int mask, int slots)
1211{
1212 struct snd_soc_codec *codec = dai->codec;
1213 unsigned int aif1 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_1);
1214
1215 aif1 &= ~(WM9081_AIFDAC_TDM_SLOT_MASK | WM9081_AIFDAC_TDM_MODE_MASK);
1216
1217 if (slots < 1 || slots > 4)
1218 return -EINVAL;
1219
1220 aif1 |= (slots - 1) << WM9081_AIFDAC_TDM_MODE_SHIFT;
1221
1222 switch (mask) {
1223 case 1:
1224 break;
1225 case 2:
1226 aif1 |= 0x10;
1227 break;
1228 case 4:
1229 aif1 |= 0x20;
1230 break;
1231 case 8:
1232 aif1 |= 0x30;
1233 break;
1234 default:
1235 return -EINVAL;
1236 }
1237
1238 wm9081_write(codec, WM9081_AUDIO_INTERFACE_1, aif1);
1239
1240 return 0;
1241}
1242
1243#define WM9081_RATES SNDRV_PCM_RATE_8000_96000
1244
1245#define WM9081_FORMATS \
1246 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
1247 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
1248
1249static struct snd_soc_dai_ops wm9081_dai_ops = {
1250 .hw_params = wm9081_hw_params,
1251 .set_sysclk = wm9081_set_sysclk,
1252 .set_fmt = wm9081_set_dai_fmt,
1253 .digital_mute = wm9081_digital_mute,
1254 .set_tdm_slot = wm9081_set_tdm_slot,
1255};
1256
1257/* We report two channels because the CODEC processes a stereo signal, even
1258 * though it is only capable of handling a mono output.
1259 */
1260struct snd_soc_dai wm9081_dai = {
1261 .name = "WM9081",
1262 .playback = {
1263 .stream_name = "HiFi Playback",
1264 .channels_min = 1,
1265 .channels_max = 2,
1266 .rates = WM9081_RATES,
1267 .formats = WM9081_FORMATS,
1268 },
1269 .ops = &wm9081_dai_ops,
1270};
1271EXPORT_SYMBOL_GPL(wm9081_dai);
1272
1273
1274static struct snd_soc_codec *wm9081_codec;
1275
1276static int wm9081_probe(struct platform_device *pdev)
1277{
1278 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1279 struct snd_soc_codec *codec;
1280 struct wm9081_priv *wm9081;
1281 int ret = 0;
1282
1283 if (wm9081_codec == NULL) {
1284 dev_err(&pdev->dev, "Codec device not registered\n");
1285 return -ENODEV;
1286 }
1287
1288 socdev->card->codec = wm9081_codec;
1289 codec = wm9081_codec;
1290 wm9081 = codec->private_data;
1291
1292 /* register pcms */
1293 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
1294 if (ret < 0) {
1295 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
1296 goto pcm_err;
1297 }
1298
1299 snd_soc_add_controls(codec, wm9081_snd_controls,
1300 ARRAY_SIZE(wm9081_snd_controls));
1301 if (!wm9081->retune) {
1302 dev_dbg(codec->dev,
1303 "No ReTune Mobile data, using normal EQ\n");
1304 snd_soc_add_controls(codec, wm9081_eq_controls,
1305 ARRAY_SIZE(wm9081_eq_controls));
1306 }
1307
1308 snd_soc_dapm_new_controls(codec, wm9081_dapm_widgets,
1309 ARRAY_SIZE(wm9081_dapm_widgets));
1310 snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
1311 snd_soc_dapm_new_widgets(codec);
1312
1313 ret = snd_soc_init_card(socdev);
1314 if (ret < 0) {
1315 dev_err(codec->dev, "failed to register card: %d\n", ret);
1316 goto card_err;
1317 }
1318
1319 return ret;
1320
1321card_err:
1322 snd_soc_free_pcms(socdev);
1323 snd_soc_dapm_free(socdev);
1324pcm_err:
1325 return ret;
1326}
1327
1328static int wm9081_remove(struct platform_device *pdev)
1329{
1330 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1331
1332 snd_soc_free_pcms(socdev);
1333 snd_soc_dapm_free(socdev);
1334
1335 return 0;
1336}
1337
1338#ifdef CONFIG_PM
1339static int wm9081_suspend(struct platform_device *pdev, pm_message_t state)
1340{
1341 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1342 struct snd_soc_codec *codec = socdev->card->codec;
1343
1344 wm9081_set_bias_level(codec, SND_SOC_BIAS_OFF);
1345
1346 return 0;
1347}
1348
1349static int wm9081_resume(struct platform_device *pdev)
1350{
1351 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1352 struct snd_soc_codec *codec = socdev->card->codec;
1353 u16 *reg_cache = codec->reg_cache;
1354 int i;
1355
1356 for (i = 0; i < codec->reg_cache_size; i++) {
1357 if (i == WM9081_SOFTWARE_RESET)
1358 continue;
1359
1360 wm9081_write(codec, i, reg_cache[i]);
1361 }
1362
1363 wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1364
1365 return 0;
1366}
1367#else
1368#define wm9081_suspend NULL
1369#define wm9081_resume NULL
1370#endif
1371
1372struct snd_soc_codec_device soc_codec_dev_wm9081 = {
1373 .probe = wm9081_probe,
1374 .remove = wm9081_remove,
1375 .suspend = wm9081_suspend,
1376 .resume = wm9081_resume,
1377};
1378EXPORT_SYMBOL_GPL(soc_codec_dev_wm9081);
1379
1380static int wm9081_register(struct wm9081_priv *wm9081)
1381{
1382 struct snd_soc_codec *codec = &wm9081->codec;
1383 int ret;
1384 u16 reg;
1385
1386 if (wm9081_codec) {
1387 dev_err(codec->dev, "Another WM9081 is registered\n");
1388 ret = -EINVAL;
1389 goto err;
1390 }
1391
1392 mutex_init(&codec->mutex);
1393 INIT_LIST_HEAD(&codec->dapm_widgets);
1394 INIT_LIST_HEAD(&codec->dapm_paths);
1395
1396 codec->private_data = wm9081;
1397 codec->name = "WM9081";
1398 codec->owner = THIS_MODULE;
1399 codec->read = wm9081_read;
1400 codec->write = wm9081_write;
1401 codec->dai = &wm9081_dai;
1402 codec->num_dai = 1;
1403 codec->reg_cache_size = ARRAY_SIZE(wm9081->reg_cache);
1404 codec->reg_cache = &wm9081->reg_cache;
1405 codec->bias_level = SND_SOC_BIAS_OFF;
1406 codec->set_bias_level = wm9081_set_bias_level;
1407
1408 memcpy(codec->reg_cache, wm9081_reg_defaults,
1409 sizeof(wm9081_reg_defaults));
1410
1411 reg = wm9081_read_hw(codec, WM9081_SOFTWARE_RESET);
1412 if (reg != 0x9081) {
1413 dev_err(codec->dev, "Device is not a WM9081: ID=0x%x\n", reg);
1414 ret = -EINVAL;
1415 goto err;
1416 }
1417
1418 ret = wm9081_reset(codec);
1419 if (ret < 0) {
1420 dev_err(codec->dev, "Failed to issue reset\n");
1421 return ret;
1422 }
1423
1424 wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1425
1426 /* Enable zero cross by default */
1427 reg = wm9081_read(codec, WM9081_ANALOGUE_LINEOUT);
1428 wm9081_write(codec, WM9081_ANALOGUE_LINEOUT, reg | WM9081_LINEOUTZC);
1429 reg = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_PGA);
1430 wm9081_write(codec, WM9081_ANALOGUE_SPEAKER_PGA,
1431 reg | WM9081_SPKPGAZC);
1432
1433 wm9081_dai.dev = codec->dev;
1434
1435 wm9081_codec = codec;
1436
1437 ret = snd_soc_register_codec(codec);
1438 if (ret != 0) {
1439 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
1440 return ret;
1441 }
1442
1443 ret = snd_soc_register_dai(&wm9081_dai);
1444 if (ret != 0) {
1445 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
1446 snd_soc_unregister_codec(codec);
1447 return ret;
1448 }
1449
1450 return 0;
1451
1452err:
1453 kfree(wm9081);
1454 return ret;
1455}
1456
1457static void wm9081_unregister(struct wm9081_priv *wm9081)
1458{
1459 wm9081_set_bias_level(&wm9081->codec, SND_SOC_BIAS_OFF);
1460 snd_soc_unregister_dai(&wm9081_dai);
1461 snd_soc_unregister_codec(&wm9081->codec);
1462 kfree(wm9081);
1463 wm9081_codec = NULL;
1464}
1465
1466static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
1467 const struct i2c_device_id *id)
1468{
1469 struct wm9081_priv *wm9081;
1470 struct snd_soc_codec *codec;
1471
1472 wm9081 = kzalloc(sizeof(struct wm9081_priv), GFP_KERNEL);
1473 if (wm9081 == NULL)
1474 return -ENOMEM;
1475
1476 codec = &wm9081->codec;
1477 codec->hw_write = (hw_write_t)i2c_master_send;
1478 wm9081->retune = i2c->dev.platform_data;
1479
1480 i2c_set_clientdata(i2c, wm9081);
1481 codec->control_data = i2c;
1482
1483 codec->dev = &i2c->dev;
1484
1485 return wm9081_register(wm9081);
1486}
1487
1488static __devexit int wm9081_i2c_remove(struct i2c_client *client)
1489{
1490 struct wm9081_priv *wm9081 = i2c_get_clientdata(client);
1491 wm9081_unregister(wm9081);
1492 return 0;
1493}
1494
1495static const struct i2c_device_id wm9081_i2c_id[] = {
1496 { "wm9081", 0 },
1497 { }
1498};
1499MODULE_DEVICE_TABLE(i2c, wm9081_i2c_id);
1500
1501static struct i2c_driver wm9081_i2c_driver = {
1502 .driver = {
1503 .name = "wm9081",
1504 .owner = THIS_MODULE,
1505 },
1506 .probe = wm9081_i2c_probe,
1507 .remove = __devexit_p(wm9081_i2c_remove),
1508 .id_table = wm9081_i2c_id,
1509};
1510
1511static int __init wm9081_modinit(void)
1512{
1513 int ret;
1514
1515 ret = i2c_add_driver(&wm9081_i2c_driver);
1516 if (ret != 0) {
1517 printk(KERN_ERR "Failed to register WM9081 I2C driver: %d\n",
1518 ret);
1519 }
1520
1521 return ret;
1522}
1523module_init(wm9081_modinit);
1524
1525static void __exit wm9081_exit(void)
1526{
1527 i2c_del_driver(&wm9081_i2c_driver);
1528}
1529module_exit(wm9081_exit);
1530
1531
1532MODULE_DESCRIPTION("ASoC WM9081 driver");
1533MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
1534MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm9081.h b/sound/soc/codecs/wm9081.h
new file mode 100644
index 000000000000..42d3bc757021
--- /dev/null
+++ b/sound/soc/codecs/wm9081.h
@@ -0,0 +1,787 @@
1#ifndef WM9081_H
2#define WM9081_H
3
4/*
5 * wm9081.c -- WM9081 ALSA SoC Audio driver
6 *
7 * Author: Mark Brown
8 *
9 * Copyright 2009 Wolfson Microelectronics plc
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <sound/soc.h>
17
18extern struct snd_soc_dai wm9081_dai;
19extern struct snd_soc_codec_device soc_codec_dev_wm9081;
20
21/*
22 * SYSCLK sources
23 */
24#define WM9081_SYSCLK_MCLK 1 /* Use MCLK without FLL */
25#define WM9081_SYSCLK_FLL_MCLK 2 /* Use MCLK, enabling FLL if required */
26
27/*
28 * Register values.
29 */
30#define WM9081_SOFTWARE_RESET 0x00
31#define WM9081_ANALOGUE_LINEOUT 0x02
32#define WM9081_ANALOGUE_SPEAKER_PGA 0x03
33#define WM9081_VMID_CONTROL 0x04
34#define WM9081_BIAS_CONTROL_1 0x05
35#define WM9081_ANALOGUE_MIXER 0x07
36#define WM9081_ANTI_POP_CONTROL 0x08
37#define WM9081_ANALOGUE_SPEAKER_1 0x09
38#define WM9081_ANALOGUE_SPEAKER_2 0x0A
39#define WM9081_POWER_MANAGEMENT 0x0B
40#define WM9081_CLOCK_CONTROL_1 0x0C
41#define WM9081_CLOCK_CONTROL_2 0x0D
42#define WM9081_CLOCK_CONTROL_3 0x0E
43#define WM9081_FLL_CONTROL_1 0x10
44#define WM9081_FLL_CONTROL_2 0x11
45#define WM9081_FLL_CONTROL_3 0x12
46#define WM9081_FLL_CONTROL_4 0x13
47#define WM9081_FLL_CONTROL_5 0x14
48#define WM9081_AUDIO_INTERFACE_1 0x16
49#define WM9081_AUDIO_INTERFACE_2 0x17
50#define WM9081_AUDIO_INTERFACE_3 0x18
51#define WM9081_AUDIO_INTERFACE_4 0x19
52#define WM9081_INTERRUPT_STATUS 0x1A
53#define WM9081_INTERRUPT_STATUS_MASK 0x1B
54#define WM9081_INTERRUPT_POLARITY 0x1C
55#define WM9081_INTERRUPT_CONTROL 0x1D
56#define WM9081_DAC_DIGITAL_1 0x1E
57#define WM9081_DAC_DIGITAL_2 0x1F
58#define WM9081_DRC_1 0x20
59#define WM9081_DRC_2 0x21
60#define WM9081_DRC_3 0x22
61#define WM9081_DRC_4 0x23
62#define WM9081_WRITE_SEQUENCER_1 0x26
63#define WM9081_WRITE_SEQUENCER_2 0x27
64#define WM9081_MW_SLAVE_1 0x28
65#define WM9081_EQ_1 0x2A
66#define WM9081_EQ_2 0x2B
67#define WM9081_EQ_3 0x2C
68#define WM9081_EQ_4 0x2D
69#define WM9081_EQ_5 0x2E
70#define WM9081_EQ_6 0x2F
71#define WM9081_EQ_7 0x30
72#define WM9081_EQ_8 0x31
73#define WM9081_EQ_9 0x32
74#define WM9081_EQ_10 0x33
75#define WM9081_EQ_11 0x34
76#define WM9081_EQ_12 0x35
77#define WM9081_EQ_13 0x36
78#define WM9081_EQ_14 0x37
79#define WM9081_EQ_15 0x38
80#define WM9081_EQ_16 0x39
81#define WM9081_EQ_17 0x3A
82#define WM9081_EQ_18 0x3B
83#define WM9081_EQ_19 0x3C
84#define WM9081_EQ_20 0x3D
85
86#define WM9081_REGISTER_COUNT 55
87#define WM9081_MAX_REGISTER 0x3D
88
89/*
90 * Field Definitions.
91 */
92
93/*
94 * R0 (0x00) - Software Reset
95 */
96#define WM9081_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
97#define WM9081_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
98#define WM9081_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
99
100/*
101 * R2 (0x02) - Analogue Lineout
102 */
103#define WM9081_LINEOUT_MUTE 0x0080 /* LINEOUT_MUTE */
104#define WM9081_LINEOUT_MUTE_MASK 0x0080 /* LINEOUT_MUTE */
105#define WM9081_LINEOUT_MUTE_SHIFT 7 /* LINEOUT_MUTE */
106#define WM9081_LINEOUT_MUTE_WIDTH 1 /* LINEOUT_MUTE */
107#define WM9081_LINEOUTZC 0x0040 /* LINEOUTZC */
108#define WM9081_LINEOUTZC_MASK 0x0040 /* LINEOUTZC */
109#define WM9081_LINEOUTZC_SHIFT 6 /* LINEOUTZC */
110#define WM9081_LINEOUTZC_WIDTH 1 /* LINEOUTZC */
111#define WM9081_LINEOUT_VOL_MASK 0x003F /* LINEOUT_VOL - [5:0] */
112#define WM9081_LINEOUT_VOL_SHIFT 0 /* LINEOUT_VOL - [5:0] */
113#define WM9081_LINEOUT_VOL_WIDTH 6 /* LINEOUT_VOL - [5:0] */
114
115/*
116 * R3 (0x03) - Analogue Speaker PGA
117 */
118#define WM9081_SPKPGA_MUTE 0x0080 /* SPKPGA_MUTE */
119#define WM9081_SPKPGA_MUTE_MASK 0x0080 /* SPKPGA_MUTE */
120#define WM9081_SPKPGA_MUTE_SHIFT 7 /* SPKPGA_MUTE */
121#define WM9081_SPKPGA_MUTE_WIDTH 1 /* SPKPGA_MUTE */
122#define WM9081_SPKPGAZC 0x0040 /* SPKPGAZC */
123#define WM9081_SPKPGAZC_MASK 0x0040 /* SPKPGAZC */
124#define WM9081_SPKPGAZC_SHIFT 6 /* SPKPGAZC */
125#define WM9081_SPKPGAZC_WIDTH 1 /* SPKPGAZC */
126#define WM9081_SPKPGA_VOL_MASK 0x003F /* SPKPGA_VOL - [5:0] */
127#define WM9081_SPKPGA_VOL_SHIFT 0 /* SPKPGA_VOL - [5:0] */
128#define WM9081_SPKPGA_VOL_WIDTH 6 /* SPKPGA_VOL - [5:0] */
129
130/*
131 * R4 (0x04) - VMID Control
132 */
133#define WM9081_VMID_BUF_ENA 0x0020 /* VMID_BUF_ENA */
134#define WM9081_VMID_BUF_ENA_MASK 0x0020 /* VMID_BUF_ENA */
135#define WM9081_VMID_BUF_ENA_SHIFT 5 /* VMID_BUF_ENA */
136#define WM9081_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
137#define WM9081_VMID_RAMP 0x0008 /* VMID_RAMP */
138#define WM9081_VMID_RAMP_MASK 0x0008 /* VMID_RAMP */
139#define WM9081_VMID_RAMP_SHIFT 3 /* VMID_RAMP */
140#define WM9081_VMID_RAMP_WIDTH 1 /* VMID_RAMP */
141#define WM9081_VMID_SEL_MASK 0x0006 /* VMID_SEL - [2:1] */
142#define WM9081_VMID_SEL_SHIFT 1 /* VMID_SEL - [2:1] */
143#define WM9081_VMID_SEL_WIDTH 2 /* VMID_SEL - [2:1] */
144#define WM9081_VMID_FAST_ST 0x0001 /* VMID_FAST_ST */
145#define WM9081_VMID_FAST_ST_MASK 0x0001 /* VMID_FAST_ST */
146#define WM9081_VMID_FAST_ST_SHIFT 0 /* VMID_FAST_ST */
147#define WM9081_VMID_FAST_ST_WIDTH 1 /* VMID_FAST_ST */
148
149/*
150 * R5 (0x05) - Bias Control 1
151 */
152#define WM9081_BIAS_SRC 0x0040 /* BIAS_SRC */
153#define WM9081_BIAS_SRC_MASK 0x0040 /* BIAS_SRC */
154#define WM9081_BIAS_SRC_SHIFT 6 /* BIAS_SRC */
155#define WM9081_BIAS_SRC_WIDTH 1 /* BIAS_SRC */
156#define WM9081_STBY_BIAS_LVL 0x0020 /* STBY_BIAS_LVL */
157#define WM9081_STBY_BIAS_LVL_MASK 0x0020 /* STBY_BIAS_LVL */
158#define WM9081_STBY_BIAS_LVL_SHIFT 5 /* STBY_BIAS_LVL */
159#define WM9081_STBY_BIAS_LVL_WIDTH 1 /* STBY_BIAS_LVL */
160#define WM9081_STBY_BIAS_ENA 0x0010 /* STBY_BIAS_ENA */
161#define WM9081_STBY_BIAS_ENA_MASK 0x0010 /* STBY_BIAS_ENA */
162#define WM9081_STBY_BIAS_ENA_SHIFT 4 /* STBY_BIAS_ENA */
163#define WM9081_STBY_BIAS_ENA_WIDTH 1 /* STBY_BIAS_ENA */
164#define WM9081_BIAS_LVL_MASK 0x000C /* BIAS_LVL - [3:2] */
165#define WM9081_BIAS_LVL_SHIFT 2 /* BIAS_LVL - [3:2] */
166#define WM9081_BIAS_LVL_WIDTH 2 /* BIAS_LVL - [3:2] */
167#define WM9081_BIAS_ENA 0x0002 /* BIAS_ENA */
168#define WM9081_BIAS_ENA_MASK 0x0002 /* BIAS_ENA */
169#define WM9081_BIAS_ENA_SHIFT 1 /* BIAS_ENA */
170#define WM9081_BIAS_ENA_WIDTH 1 /* BIAS_ENA */
171#define WM9081_STARTUP_BIAS_ENA 0x0001 /* STARTUP_BIAS_ENA */
172#define WM9081_STARTUP_BIAS_ENA_MASK 0x0001 /* STARTUP_BIAS_ENA */
173#define WM9081_STARTUP_BIAS_ENA_SHIFT 0 /* STARTUP_BIAS_ENA */
174#define WM9081_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */
175
176/*
177 * R7 (0x07) - Analogue Mixer
178 */
179#define WM9081_DAC_SEL 0x0010 /* DAC_SEL */
180#define WM9081_DAC_SEL_MASK 0x0010 /* DAC_SEL */
181#define WM9081_DAC_SEL_SHIFT 4 /* DAC_SEL */
182#define WM9081_DAC_SEL_WIDTH 1 /* DAC_SEL */
183#define WM9081_IN2_VOL 0x0008 /* IN2_VOL */
184#define WM9081_IN2_VOL_MASK 0x0008 /* IN2_VOL */
185#define WM9081_IN2_VOL_SHIFT 3 /* IN2_VOL */
186#define WM9081_IN2_VOL_WIDTH 1 /* IN2_VOL */
187#define WM9081_IN2_ENA 0x0004 /* IN2_ENA */
188#define WM9081_IN2_ENA_MASK 0x0004 /* IN2_ENA */
189#define WM9081_IN2_ENA_SHIFT 2 /* IN2_ENA */
190#define WM9081_IN2_ENA_WIDTH 1 /* IN2_ENA */
191#define WM9081_IN1_VOL 0x0002 /* IN1_VOL */
192#define WM9081_IN1_VOL_MASK 0x0002 /* IN1_VOL */
193#define WM9081_IN1_VOL_SHIFT 1 /* IN1_VOL */
194#define WM9081_IN1_VOL_WIDTH 1 /* IN1_VOL */
195#define WM9081_IN1_ENA 0x0001 /* IN1_ENA */
196#define WM9081_IN1_ENA_MASK 0x0001 /* IN1_ENA */
197#define WM9081_IN1_ENA_SHIFT 0 /* IN1_ENA */
198#define WM9081_IN1_ENA_WIDTH 1 /* IN1_ENA */
199
200/*
201 * R8 (0x08) - Anti Pop Control
202 */
203#define WM9081_LINEOUT_DISCH 0x0004 /* LINEOUT_DISCH */
204#define WM9081_LINEOUT_DISCH_MASK 0x0004 /* LINEOUT_DISCH */
205#define WM9081_LINEOUT_DISCH_SHIFT 2 /* LINEOUT_DISCH */
206#define WM9081_LINEOUT_DISCH_WIDTH 1 /* LINEOUT_DISCH */
207#define WM9081_LINEOUT_VROI 0x0002 /* LINEOUT_VROI */
208#define WM9081_LINEOUT_VROI_MASK 0x0002 /* LINEOUT_VROI */
209#define WM9081_LINEOUT_VROI_SHIFT 1 /* LINEOUT_VROI */
210#define WM9081_LINEOUT_VROI_WIDTH 1 /* LINEOUT_VROI */
211#define WM9081_LINEOUT_CLAMP 0x0001 /* LINEOUT_CLAMP */
212#define WM9081_LINEOUT_CLAMP_MASK 0x0001 /* LINEOUT_CLAMP */
213#define WM9081_LINEOUT_CLAMP_SHIFT 0 /* LINEOUT_CLAMP */
214#define WM9081_LINEOUT_CLAMP_WIDTH 1 /* LINEOUT_CLAMP */
215
216/*
217 * R9 (0x09) - Analogue Speaker 1
218 */
219#define WM9081_SPK_DCGAIN_MASK 0x0038 /* SPK_DCGAIN - [5:3] */
220#define WM9081_SPK_DCGAIN_SHIFT 3 /* SPK_DCGAIN - [5:3] */
221#define WM9081_SPK_DCGAIN_WIDTH 3 /* SPK_DCGAIN - [5:3] */
222#define WM9081_SPK_ACGAIN_MASK 0x0007 /* SPK_ACGAIN - [2:0] */
223#define WM9081_SPK_ACGAIN_SHIFT 0 /* SPK_ACGAIN - [2:0] */
224#define WM9081_SPK_ACGAIN_WIDTH 3 /* SPK_ACGAIN - [2:0] */
225
226/*
227 * R10 (0x0A) - Analogue Speaker 2
228 */
229#define WM9081_SPK_MODE 0x0040 /* SPK_MODE */
230#define WM9081_SPK_MODE_MASK 0x0040 /* SPK_MODE */
231#define WM9081_SPK_MODE_SHIFT 6 /* SPK_MODE */
232#define WM9081_SPK_MODE_WIDTH 1 /* SPK_MODE */
233#define WM9081_SPK_INV_MUTE 0x0010 /* SPK_INV_MUTE */
234#define WM9081_SPK_INV_MUTE_MASK 0x0010 /* SPK_INV_MUTE */
235#define WM9081_SPK_INV_MUTE_SHIFT 4 /* SPK_INV_MUTE */
236#define WM9081_SPK_INV_MUTE_WIDTH 1 /* SPK_INV_MUTE */
237#define WM9081_OUT_SPK_CTRL 0x0008 /* OUT_SPK_CTRL */
238#define WM9081_OUT_SPK_CTRL_MASK 0x0008 /* OUT_SPK_CTRL */
239#define WM9081_OUT_SPK_CTRL_SHIFT 3 /* OUT_SPK_CTRL */
240#define WM9081_OUT_SPK_CTRL_WIDTH 1 /* OUT_SPK_CTRL */
241
242/*
243 * R11 (0x0B) - Power Management
244 */
245#define WM9081_TSHUT_ENA 0x0100 /* TSHUT_ENA */
246#define WM9081_TSHUT_ENA_MASK 0x0100 /* TSHUT_ENA */
247#define WM9081_TSHUT_ENA_SHIFT 8 /* TSHUT_ENA */
248#define WM9081_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */
249#define WM9081_TSENSE_ENA 0x0080 /* TSENSE_ENA */
250#define WM9081_TSENSE_ENA_MASK 0x0080 /* TSENSE_ENA */
251#define WM9081_TSENSE_ENA_SHIFT 7 /* TSENSE_ENA */
252#define WM9081_TSENSE_ENA_WIDTH 1 /* TSENSE_ENA */
253#define WM9081_TEMP_SHUT 0x0040 /* TEMP_SHUT */
254#define WM9081_TEMP_SHUT_MASK 0x0040 /* TEMP_SHUT */
255#define WM9081_TEMP_SHUT_SHIFT 6 /* TEMP_SHUT */
256#define WM9081_TEMP_SHUT_WIDTH 1 /* TEMP_SHUT */
257#define WM9081_LINEOUT_ENA 0x0010 /* LINEOUT_ENA */
258#define WM9081_LINEOUT_ENA_MASK 0x0010 /* LINEOUT_ENA */
259#define WM9081_LINEOUT_ENA_SHIFT 4 /* LINEOUT_ENA */
260#define WM9081_LINEOUT_ENA_WIDTH 1 /* LINEOUT_ENA */
261#define WM9081_SPKPGA_ENA 0x0004 /* SPKPGA_ENA */
262#define WM9081_SPKPGA_ENA_MASK 0x0004 /* SPKPGA_ENA */
263#define WM9081_SPKPGA_ENA_SHIFT 2 /* SPKPGA_ENA */
264#define WM9081_SPKPGA_ENA_WIDTH 1 /* SPKPGA_ENA */
265#define WM9081_SPK_ENA 0x0002 /* SPK_ENA */
266#define WM9081_SPK_ENA_MASK 0x0002 /* SPK_ENA */
267#define WM9081_SPK_ENA_SHIFT 1 /* SPK_ENA */
268#define WM9081_SPK_ENA_WIDTH 1 /* SPK_ENA */
269#define WM9081_DAC_ENA 0x0001 /* DAC_ENA */
270#define WM9081_DAC_ENA_MASK 0x0001 /* DAC_ENA */
271#define WM9081_DAC_ENA_SHIFT 0 /* DAC_ENA */
272#define WM9081_DAC_ENA_WIDTH 1 /* DAC_ENA */
273
274/*
275 * R12 (0x0C) - Clock Control 1
276 */
277#define WM9081_CLK_OP_DIV_MASK 0x1C00 /* CLK_OP_DIV - [12:10] */
278#define WM9081_CLK_OP_DIV_SHIFT 10 /* CLK_OP_DIV - [12:10] */
279#define WM9081_CLK_OP_DIV_WIDTH 3 /* CLK_OP_DIV - [12:10] */
280#define WM9081_CLK_TO_DIV_MASK 0x0300 /* CLK_TO_DIV - [9:8] */
281#define WM9081_CLK_TO_DIV_SHIFT 8 /* CLK_TO_DIV - [9:8] */
282#define WM9081_CLK_TO_DIV_WIDTH 2 /* CLK_TO_DIV - [9:8] */
283#define WM9081_MCLKDIV2 0x0080 /* MCLKDIV2 */
284#define WM9081_MCLKDIV2_MASK 0x0080 /* MCLKDIV2 */
285#define WM9081_MCLKDIV2_SHIFT 7 /* MCLKDIV2 */
286#define WM9081_MCLKDIV2_WIDTH 1 /* MCLKDIV2 */
287
288/*
289 * R13 (0x0D) - Clock Control 2
290 */
291#define WM9081_CLK_SYS_RATE_MASK 0x00F0 /* CLK_SYS_RATE - [7:4] */
292#define WM9081_CLK_SYS_RATE_SHIFT 4 /* CLK_SYS_RATE - [7:4] */
293#define WM9081_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [7:4] */
294#define WM9081_SAMPLE_RATE_MASK 0x000F /* SAMPLE_RATE - [3:0] */
295#define WM9081_SAMPLE_RATE_SHIFT 0 /* SAMPLE_RATE - [3:0] */
296#define WM9081_SAMPLE_RATE_WIDTH 4 /* SAMPLE_RATE - [3:0] */
297
298/*
299 * R14 (0x0E) - Clock Control 3
300 */
301#define WM9081_CLK_SRC_SEL 0x2000 /* CLK_SRC_SEL */
302#define WM9081_CLK_SRC_SEL_MASK 0x2000 /* CLK_SRC_SEL */
303#define WM9081_CLK_SRC_SEL_SHIFT 13 /* CLK_SRC_SEL */
304#define WM9081_CLK_SRC_SEL_WIDTH 1 /* CLK_SRC_SEL */
305#define WM9081_CLK_OP_ENA 0x0020 /* CLK_OP_ENA */
306#define WM9081_CLK_OP_ENA_MASK 0x0020 /* CLK_OP_ENA */
307#define WM9081_CLK_OP_ENA_SHIFT 5 /* CLK_OP_ENA */
308#define WM9081_CLK_OP_ENA_WIDTH 1 /* CLK_OP_ENA */
309#define WM9081_CLK_TO_ENA 0x0004 /* CLK_TO_ENA */
310#define WM9081_CLK_TO_ENA_MASK 0x0004 /* CLK_TO_ENA */
311#define WM9081_CLK_TO_ENA_SHIFT 2 /* CLK_TO_ENA */
312#define WM9081_CLK_TO_ENA_WIDTH 1 /* CLK_TO_ENA */
313#define WM9081_CLK_DSP_ENA 0x0002 /* CLK_DSP_ENA */
314#define WM9081_CLK_DSP_ENA_MASK 0x0002 /* CLK_DSP_ENA */
315#define WM9081_CLK_DSP_ENA_SHIFT 1 /* CLK_DSP_ENA */
316#define WM9081_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
317#define WM9081_CLK_SYS_ENA 0x0001 /* CLK_SYS_ENA */
318#define WM9081_CLK_SYS_ENA_MASK 0x0001 /* CLK_SYS_ENA */
319#define WM9081_CLK_SYS_ENA_SHIFT 0 /* CLK_SYS_ENA */
320#define WM9081_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
321
322/*
323 * R16 (0x10) - FLL Control 1
324 */
325#define WM9081_FLL_HOLD 0x0008 /* FLL_HOLD */
326#define WM9081_FLL_HOLD_MASK 0x0008 /* FLL_HOLD */
327#define WM9081_FLL_HOLD_SHIFT 3 /* FLL_HOLD */
328#define WM9081_FLL_HOLD_WIDTH 1 /* FLL_HOLD */
329#define WM9081_FLL_FRAC 0x0004 /* FLL_FRAC */
330#define WM9081_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */
331#define WM9081_FLL_FRAC_SHIFT 2 /* FLL_FRAC */
332#define WM9081_FLL_FRAC_WIDTH 1 /* FLL_FRAC */
333#define WM9081_FLL_ENA 0x0001 /* FLL_ENA */
334#define WM9081_FLL_ENA_MASK 0x0001 /* FLL_ENA */
335#define WM9081_FLL_ENA_SHIFT 0 /* FLL_ENA */
336#define WM9081_FLL_ENA_WIDTH 1 /* FLL_ENA */
337
338/*
339 * R17 (0x11) - FLL Control 2
340 */
341#define WM9081_FLL_OUTDIV_MASK 0x0700 /* FLL_OUTDIV - [10:8] */
342#define WM9081_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [10:8] */
343#define WM9081_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [10:8] */
344#define WM9081_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */
345#define WM9081_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */
346#define WM9081_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */
347#define WM9081_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */
348#define WM9081_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */
349#define WM9081_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */
350
351/*
352 * R18 (0x12) - FLL Control 3
353 */
354#define WM9081_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
355#define WM9081_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
356#define WM9081_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
357
358/*
359 * R19 (0x13) - FLL Control 4
360 */
361#define WM9081_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */
362#define WM9081_FLL_N_SHIFT 5 /* FLL_N - [14:5] */
363#define WM9081_FLL_N_WIDTH 10 /* FLL_N - [14:5] */
364#define WM9081_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */
365#define WM9081_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */
366#define WM9081_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */
367
368/*
369 * R20 (0x14) - FLL Control 5
370 */
371#define WM9081_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */
372#define WM9081_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */
373#define WM9081_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */
374#define WM9081_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */
375#define WM9081_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */
376#define WM9081_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
377
378/*
379 * R22 (0x16) - Audio Interface 1
380 */
381#define WM9081_AIFDAC_CHAN 0x0040 /* AIFDAC_CHAN */
382#define WM9081_AIFDAC_CHAN_MASK 0x0040 /* AIFDAC_CHAN */
383#define WM9081_AIFDAC_CHAN_SHIFT 6 /* AIFDAC_CHAN */
384#define WM9081_AIFDAC_CHAN_WIDTH 1 /* AIFDAC_CHAN */
385#define WM9081_AIFDAC_TDM_SLOT_MASK 0x0030 /* AIFDAC_TDM_SLOT - [5:4] */
386#define WM9081_AIFDAC_TDM_SLOT_SHIFT 4 /* AIFDAC_TDM_SLOT - [5:4] */
387#define WM9081_AIFDAC_TDM_SLOT_WIDTH 2 /* AIFDAC_TDM_SLOT - [5:4] */
388#define WM9081_AIFDAC_TDM_MODE_MASK 0x000C /* AIFDAC_TDM_MODE - [3:2] */
389#define WM9081_AIFDAC_TDM_MODE_SHIFT 2 /* AIFDAC_TDM_MODE - [3:2] */
390#define WM9081_AIFDAC_TDM_MODE_WIDTH 2 /* AIFDAC_TDM_MODE - [3:2] */
391#define WM9081_DAC_COMP 0x0002 /* DAC_COMP */
392#define WM9081_DAC_COMP_MASK 0x0002 /* DAC_COMP */
393#define WM9081_DAC_COMP_SHIFT 1 /* DAC_COMP */
394#define WM9081_DAC_COMP_WIDTH 1 /* DAC_COMP */
395#define WM9081_DAC_COMPMODE 0x0001 /* DAC_COMPMODE */
396#define WM9081_DAC_COMPMODE_MASK 0x0001 /* DAC_COMPMODE */
397#define WM9081_DAC_COMPMODE_SHIFT 0 /* DAC_COMPMODE */
398#define WM9081_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */
399
400/*
401 * R23 (0x17) - Audio Interface 2
402 */
403#define WM9081_AIF_TRIS 0x0200 /* AIF_TRIS */
404#define WM9081_AIF_TRIS_MASK 0x0200 /* AIF_TRIS */
405#define WM9081_AIF_TRIS_SHIFT 9 /* AIF_TRIS */
406#define WM9081_AIF_TRIS_WIDTH 1 /* AIF_TRIS */
407#define WM9081_DAC_DAT_INV 0x0100 /* DAC_DAT_INV */
408#define WM9081_DAC_DAT_INV_MASK 0x0100 /* DAC_DAT_INV */
409#define WM9081_DAC_DAT_INV_SHIFT 8 /* DAC_DAT_INV */
410#define WM9081_DAC_DAT_INV_WIDTH 1 /* DAC_DAT_INV */
411#define WM9081_AIF_BCLK_INV 0x0080 /* AIF_BCLK_INV */
412#define WM9081_AIF_BCLK_INV_MASK 0x0080 /* AIF_BCLK_INV */
413#define WM9081_AIF_BCLK_INV_SHIFT 7 /* AIF_BCLK_INV */
414#define WM9081_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */
415#define WM9081_BCLK_DIR 0x0040 /* BCLK_DIR */
416#define WM9081_BCLK_DIR_MASK 0x0040 /* BCLK_DIR */
417#define WM9081_BCLK_DIR_SHIFT 6 /* BCLK_DIR */
418#define WM9081_BCLK_DIR_WIDTH 1 /* BCLK_DIR */
419#define WM9081_LRCLK_DIR 0x0020 /* LRCLK_DIR */
420#define WM9081_LRCLK_DIR_MASK 0x0020 /* LRCLK_DIR */
421#define WM9081_LRCLK_DIR_SHIFT 5 /* LRCLK_DIR */
422#define WM9081_LRCLK_DIR_WIDTH 1 /* LRCLK_DIR */
423#define WM9081_AIF_LRCLK_INV 0x0010 /* AIF_LRCLK_INV */
424#define WM9081_AIF_LRCLK_INV_MASK 0x0010 /* AIF_LRCLK_INV */
425#define WM9081_AIF_LRCLK_INV_SHIFT 4 /* AIF_LRCLK_INV */
426#define WM9081_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */
427#define WM9081_AIF_WL_MASK 0x000C /* AIF_WL - [3:2] */
428#define WM9081_AIF_WL_SHIFT 2 /* AIF_WL - [3:2] */
429#define WM9081_AIF_WL_WIDTH 2 /* AIF_WL - [3:2] */
430#define WM9081_AIF_FMT_MASK 0x0003 /* AIF_FMT - [1:0] */
431#define WM9081_AIF_FMT_SHIFT 0 /* AIF_FMT - [1:0] */
432#define WM9081_AIF_FMT_WIDTH 2 /* AIF_FMT - [1:0] */
433
434/*
435 * R24 (0x18) - Audio Interface 3
436 */
437#define WM9081_BCLK_DIV_MASK 0x001F /* BCLK_DIV - [4:0] */
438#define WM9081_BCLK_DIV_SHIFT 0 /* BCLK_DIV - [4:0] */
439#define WM9081_BCLK_DIV_WIDTH 5 /* BCLK_DIV - [4:0] */
440
441/*
442 * R25 (0x19) - Audio Interface 4
443 */
444#define WM9081_LRCLK_RATE_MASK 0x07FF /* LRCLK_RATE - [10:0] */
445#define WM9081_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [10:0] */
446#define WM9081_LRCLK_RATE_WIDTH 11 /* LRCLK_RATE - [10:0] */
447
448/*
449 * R26 (0x1A) - Interrupt Status
450 */
451#define WM9081_WSEQ_BUSY_EINT 0x0004 /* WSEQ_BUSY_EINT */
452#define WM9081_WSEQ_BUSY_EINT_MASK 0x0004 /* WSEQ_BUSY_EINT */
453#define WM9081_WSEQ_BUSY_EINT_SHIFT 2 /* WSEQ_BUSY_EINT */
454#define WM9081_WSEQ_BUSY_EINT_WIDTH 1 /* WSEQ_BUSY_EINT */
455#define WM9081_TSHUT_EINT 0x0001 /* TSHUT_EINT */
456#define WM9081_TSHUT_EINT_MASK 0x0001 /* TSHUT_EINT */
457#define WM9081_TSHUT_EINT_SHIFT 0 /* TSHUT_EINT */
458#define WM9081_TSHUT_EINT_WIDTH 1 /* TSHUT_EINT */
459
460/*
461 * R27 (0x1B) - Interrupt Status Mask
462 */
463#define WM9081_IM_WSEQ_BUSY_EINT 0x0004 /* IM_WSEQ_BUSY_EINT */
464#define WM9081_IM_WSEQ_BUSY_EINT_MASK 0x0004 /* IM_WSEQ_BUSY_EINT */
465#define WM9081_IM_WSEQ_BUSY_EINT_SHIFT 2 /* IM_WSEQ_BUSY_EINT */
466#define WM9081_IM_WSEQ_BUSY_EINT_WIDTH 1 /* IM_WSEQ_BUSY_EINT */
467#define WM9081_IM_TSHUT_EINT 0x0001 /* IM_TSHUT_EINT */
468#define WM9081_IM_TSHUT_EINT_MASK 0x0001 /* IM_TSHUT_EINT */
469#define WM9081_IM_TSHUT_EINT_SHIFT 0 /* IM_TSHUT_EINT */
470#define WM9081_IM_TSHUT_EINT_WIDTH 1 /* IM_TSHUT_EINT */
471
472/*
473 * R28 (0x1C) - Interrupt Polarity
474 */
475#define WM9081_TSHUT_INV 0x0001 /* TSHUT_INV */
476#define WM9081_TSHUT_INV_MASK 0x0001 /* TSHUT_INV */
477#define WM9081_TSHUT_INV_SHIFT 0 /* TSHUT_INV */
478#define WM9081_TSHUT_INV_WIDTH 1 /* TSHUT_INV */
479
480/*
481 * R29 (0x1D) - Interrupt Control
482 */
483#define WM9081_IRQ_POL 0x8000 /* IRQ_POL */
484#define WM9081_IRQ_POL_MASK 0x8000 /* IRQ_POL */
485#define WM9081_IRQ_POL_SHIFT 15 /* IRQ_POL */
486#define WM9081_IRQ_POL_WIDTH 1 /* IRQ_POL */
487#define WM9081_IRQ_OP_CTRL 0x0001 /* IRQ_OP_CTRL */
488#define WM9081_IRQ_OP_CTRL_MASK 0x0001 /* IRQ_OP_CTRL */
489#define WM9081_IRQ_OP_CTRL_SHIFT 0 /* IRQ_OP_CTRL */
490#define WM9081_IRQ_OP_CTRL_WIDTH 1 /* IRQ_OP_CTRL */
491
492/*
493 * R30 (0x1E) - DAC Digital 1
494 */
495#define WM9081_DAC_VOL_MASK 0x00FF /* DAC_VOL - [7:0] */
496#define WM9081_DAC_VOL_SHIFT 0 /* DAC_VOL - [7:0] */
497#define WM9081_DAC_VOL_WIDTH 8 /* DAC_VOL - [7:0] */
498
499/*
500 * R31 (0x1F) - DAC Digital 2
501 */
502#define WM9081_DAC_MUTERATE 0x0400 /* DAC_MUTERATE */
503#define WM9081_DAC_MUTERATE_MASK 0x0400 /* DAC_MUTERATE */
504#define WM9081_DAC_MUTERATE_SHIFT 10 /* DAC_MUTERATE */
505#define WM9081_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */
506#define WM9081_DAC_MUTEMODE 0x0200 /* DAC_MUTEMODE */
507#define WM9081_DAC_MUTEMODE_MASK 0x0200 /* DAC_MUTEMODE */
508#define WM9081_DAC_MUTEMODE_SHIFT 9 /* DAC_MUTEMODE */
509#define WM9081_DAC_MUTEMODE_WIDTH 1 /* DAC_MUTEMODE */
510#define WM9081_DAC_MUTE 0x0008 /* DAC_MUTE */
511#define WM9081_DAC_MUTE_MASK 0x0008 /* DAC_MUTE */
512#define WM9081_DAC_MUTE_SHIFT 3 /* DAC_MUTE */
513#define WM9081_DAC_MUTE_WIDTH 1 /* DAC_MUTE */
514#define WM9081_DEEMPH_MASK 0x0006 /* DEEMPH - [2:1] */
515#define WM9081_DEEMPH_SHIFT 1 /* DEEMPH - [2:1] */
516#define WM9081_DEEMPH_WIDTH 2 /* DEEMPH - [2:1] */
517
518/*
519 * R32 (0x20) - DRC 1
520 */
521#define WM9081_DRC_ENA 0x8000 /* DRC_ENA */
522#define WM9081_DRC_ENA_MASK 0x8000 /* DRC_ENA */
523#define WM9081_DRC_ENA_SHIFT 15 /* DRC_ENA */
524#define WM9081_DRC_ENA_WIDTH 1 /* DRC_ENA */
525#define WM9081_DRC_STARTUP_GAIN_MASK 0x07C0 /* DRC_STARTUP_GAIN - [10:6] */
526#define WM9081_DRC_STARTUP_GAIN_SHIFT 6 /* DRC_STARTUP_GAIN - [10:6] */
527#define WM9081_DRC_STARTUP_GAIN_WIDTH 5 /* DRC_STARTUP_GAIN - [10:6] */
528#define WM9081_DRC_FF_DLY 0x0020 /* DRC_FF_DLY */
529#define WM9081_DRC_FF_DLY_MASK 0x0020 /* DRC_FF_DLY */
530#define WM9081_DRC_FF_DLY_SHIFT 5 /* DRC_FF_DLY */
531#define WM9081_DRC_FF_DLY_WIDTH 1 /* DRC_FF_DLY */
532#define WM9081_DRC_QR 0x0004 /* DRC_QR */
533#define WM9081_DRC_QR_MASK 0x0004 /* DRC_QR */
534#define WM9081_DRC_QR_SHIFT 2 /* DRC_QR */
535#define WM9081_DRC_QR_WIDTH 1 /* DRC_QR */
536#define WM9081_DRC_ANTICLIP 0x0002 /* DRC_ANTICLIP */
537#define WM9081_DRC_ANTICLIP_MASK 0x0002 /* DRC_ANTICLIP */
538#define WM9081_DRC_ANTICLIP_SHIFT 1 /* DRC_ANTICLIP */
539#define WM9081_DRC_ANTICLIP_WIDTH 1 /* DRC_ANTICLIP */
540
541/*
542 * R33 (0x21) - DRC 2
543 */
544#define WM9081_DRC_ATK_MASK 0xF000 /* DRC_ATK - [15:12] */
545#define WM9081_DRC_ATK_SHIFT 12 /* DRC_ATK - [15:12] */
546#define WM9081_DRC_ATK_WIDTH 4 /* DRC_ATK - [15:12] */
547#define WM9081_DRC_DCY_MASK 0x0F00 /* DRC_DCY - [11:8] */
548#define WM9081_DRC_DCY_SHIFT 8 /* DRC_DCY - [11:8] */
549#define WM9081_DRC_DCY_WIDTH 4 /* DRC_DCY - [11:8] */
550#define WM9081_DRC_QR_THR_MASK 0x00C0 /* DRC_QR_THR - [7:6] */
551#define WM9081_DRC_QR_THR_SHIFT 6 /* DRC_QR_THR - [7:6] */
552#define WM9081_DRC_QR_THR_WIDTH 2 /* DRC_QR_THR - [7:6] */
553#define WM9081_DRC_QR_DCY_MASK 0x0030 /* DRC_QR_DCY - [5:4] */
554#define WM9081_DRC_QR_DCY_SHIFT 4 /* DRC_QR_DCY - [5:4] */
555#define WM9081_DRC_QR_DCY_WIDTH 2 /* DRC_QR_DCY - [5:4] */
556#define WM9081_DRC_MINGAIN_MASK 0x000C /* DRC_MINGAIN - [3:2] */
557#define WM9081_DRC_MINGAIN_SHIFT 2 /* DRC_MINGAIN - [3:2] */
558#define WM9081_DRC_MINGAIN_WIDTH 2 /* DRC_MINGAIN - [3:2] */
559#define WM9081_DRC_MAXGAIN_MASK 0x0003 /* DRC_MAXGAIN - [1:0] */
560#define WM9081_DRC_MAXGAIN_SHIFT 0 /* DRC_MAXGAIN - [1:0] */
561#define WM9081_DRC_MAXGAIN_WIDTH 2 /* DRC_MAXGAIN - [1:0] */
562
563/*
564 * R34 (0x22) - DRC 3
565 */
566#define WM9081_DRC_HI_COMP_MASK 0x0038 /* DRC_HI_COMP - [5:3] */
567#define WM9081_DRC_HI_COMP_SHIFT 3 /* DRC_HI_COMP - [5:3] */
568#define WM9081_DRC_HI_COMP_WIDTH 3 /* DRC_HI_COMP - [5:3] */
569#define WM9081_DRC_LO_COMP_MASK 0x0007 /* DRC_LO_COMP - [2:0] */
570#define WM9081_DRC_LO_COMP_SHIFT 0 /* DRC_LO_COMP - [2:0] */
571#define WM9081_DRC_LO_COMP_WIDTH 3 /* DRC_LO_COMP - [2:0] */
572
573/*
574 * R35 (0x23) - DRC 4
575 */
576#define WM9081_DRC_KNEE_IP_MASK 0x07E0 /* DRC_KNEE_IP - [10:5] */
577#define WM9081_DRC_KNEE_IP_SHIFT 5 /* DRC_KNEE_IP - [10:5] */
578#define WM9081_DRC_KNEE_IP_WIDTH 6 /* DRC_KNEE_IP - [10:5] */
579#define WM9081_DRC_KNEE_OP_MASK 0x001F /* DRC_KNEE_OP - [4:0] */
580#define WM9081_DRC_KNEE_OP_SHIFT 0 /* DRC_KNEE_OP - [4:0] */
581#define WM9081_DRC_KNEE_OP_WIDTH 5 /* DRC_KNEE_OP - [4:0] */
582
583/*
584 * R38 (0x26) - Write Sequencer 1
585 */
586#define WM9081_WSEQ_ENA 0x8000 /* WSEQ_ENA */
587#define WM9081_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */
588#define WM9081_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */
589#define WM9081_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
590#define WM9081_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */
591#define WM9081_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */
592#define WM9081_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */
593#define WM9081_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
594#define WM9081_WSEQ_START 0x0100 /* WSEQ_START */
595#define WM9081_WSEQ_START_MASK 0x0100 /* WSEQ_START */
596#define WM9081_WSEQ_START_SHIFT 8 /* WSEQ_START */
597#define WM9081_WSEQ_START_WIDTH 1 /* WSEQ_START */
598#define WM9081_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */
599#define WM9081_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */
600#define WM9081_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */
601
602/*
603 * R39 (0x27) - Write Sequencer 2
604 */
605#define WM9081_WSEQ_CURRENT_INDEX_MASK 0x07F0 /* WSEQ_CURRENT_INDEX - [10:4] */
606#define WM9081_WSEQ_CURRENT_INDEX_SHIFT 4 /* WSEQ_CURRENT_INDEX - [10:4] */
607#define WM9081_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [10:4] */
608#define WM9081_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
609#define WM9081_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
610#define WM9081_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
611#define WM9081_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
612
613/*
614 * R40 (0x28) - MW Slave 1
615 */
616#define WM9081_SPI_CFG 0x0020 /* SPI_CFG */
617#define WM9081_SPI_CFG_MASK 0x0020 /* SPI_CFG */
618#define WM9081_SPI_CFG_SHIFT 5 /* SPI_CFG */
619#define WM9081_SPI_CFG_WIDTH 1 /* SPI_CFG */
620#define WM9081_SPI_4WIRE 0x0010 /* SPI_4WIRE */
621#define WM9081_SPI_4WIRE_MASK 0x0010 /* SPI_4WIRE */
622#define WM9081_SPI_4WIRE_SHIFT 4 /* SPI_4WIRE */
623#define WM9081_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */
624#define WM9081_ARA_ENA 0x0008 /* ARA_ENA */
625#define WM9081_ARA_ENA_MASK 0x0008 /* ARA_ENA */
626#define WM9081_ARA_ENA_SHIFT 3 /* ARA_ENA */
627#define WM9081_ARA_ENA_WIDTH 1 /* ARA_ENA */
628#define WM9081_AUTO_INC 0x0002 /* AUTO_INC */
629#define WM9081_AUTO_INC_MASK 0x0002 /* AUTO_INC */
630#define WM9081_AUTO_INC_SHIFT 1 /* AUTO_INC */
631#define WM9081_AUTO_INC_WIDTH 1 /* AUTO_INC */
632
633/*
634 * R42 (0x2A) - EQ 1
635 */
636#define WM9081_EQ_B1_GAIN_MASK 0xF800 /* EQ_B1_GAIN - [15:11] */
637#define WM9081_EQ_B1_GAIN_SHIFT 11 /* EQ_B1_GAIN - [15:11] */
638#define WM9081_EQ_B1_GAIN_WIDTH 5 /* EQ_B1_GAIN - [15:11] */
639#define WM9081_EQ_B2_GAIN_MASK 0x07C0 /* EQ_B2_GAIN - [10:6] */
640#define WM9081_EQ_B2_GAIN_SHIFT 6 /* EQ_B2_GAIN - [10:6] */
641#define WM9081_EQ_B2_GAIN_WIDTH 5 /* EQ_B2_GAIN - [10:6] */
642#define WM9081_EQ_B4_GAIN_MASK 0x003E /* EQ_B4_GAIN - [5:1] */
643#define WM9081_EQ_B4_GAIN_SHIFT 1 /* EQ_B4_GAIN - [5:1] */
644#define WM9081_EQ_B4_GAIN_WIDTH 5 /* EQ_B4_GAIN - [5:1] */
645#define WM9081_EQ_ENA 0x0001 /* EQ_ENA */
646#define WM9081_EQ_ENA_MASK 0x0001 /* EQ_ENA */
647#define WM9081_EQ_ENA_SHIFT 0 /* EQ_ENA */
648#define WM9081_EQ_ENA_WIDTH 1 /* EQ_ENA */
649
650/*
651 * R43 (0x2B) - EQ 2
652 */
653#define WM9081_EQ_B3_GAIN_MASK 0xF800 /* EQ_B3_GAIN - [15:11] */
654#define WM9081_EQ_B3_GAIN_SHIFT 11 /* EQ_B3_GAIN - [15:11] */
655#define WM9081_EQ_B3_GAIN_WIDTH 5 /* EQ_B3_GAIN - [15:11] */
656#define WM9081_EQ_B5_GAIN_MASK 0x07C0 /* EQ_B5_GAIN - [10:6] */
657#define WM9081_EQ_B5_GAIN_SHIFT 6 /* EQ_B5_GAIN - [10:6] */
658#define WM9081_EQ_B5_GAIN_WIDTH 5 /* EQ_B5_GAIN - [10:6] */
659
660/*
661 * R44 (0x2C) - EQ 3
662 */
663#define WM9081_EQ_B1_A_MASK 0xFFFF /* EQ_B1_A - [15:0] */
664#define WM9081_EQ_B1_A_SHIFT 0 /* EQ_B1_A - [15:0] */
665#define WM9081_EQ_B1_A_WIDTH 16 /* EQ_B1_A - [15:0] */
666
667/*
668 * R45 (0x2D) - EQ 4
669 */
670#define WM9081_EQ_B1_B_MASK 0xFFFF /* EQ_B1_B - [15:0] */
671#define WM9081_EQ_B1_B_SHIFT 0 /* EQ_B1_B - [15:0] */
672#define WM9081_EQ_B1_B_WIDTH 16 /* EQ_B1_B - [15:0] */
673
674/*
675 * R46 (0x2E) - EQ 5
676 */
677#define WM9081_EQ_B1_PG_MASK 0xFFFF /* EQ_B1_PG - [15:0] */
678#define WM9081_EQ_B1_PG_SHIFT 0 /* EQ_B1_PG - [15:0] */
679#define WM9081_EQ_B1_PG_WIDTH 16 /* EQ_B1_PG - [15:0] */
680
681/*
682 * R47 (0x2F) - EQ 6
683 */
684#define WM9081_EQ_B2_A_MASK 0xFFFF /* EQ_B2_A - [15:0] */
685#define WM9081_EQ_B2_A_SHIFT 0 /* EQ_B2_A - [15:0] */
686#define WM9081_EQ_B2_A_WIDTH 16 /* EQ_B2_A - [15:0] */
687
688/*
689 * R48 (0x30) - EQ 7
690 */
691#define WM9081_EQ_B2_B_MASK 0xFFFF /* EQ_B2_B - [15:0] */
692#define WM9081_EQ_B2_B_SHIFT 0 /* EQ_B2_B - [15:0] */
693#define WM9081_EQ_B2_B_WIDTH 16 /* EQ_B2_B - [15:0] */
694
695/*
696 * R49 (0x31) - EQ 8
697 */
698#define WM9081_EQ_B2_C_MASK 0xFFFF /* EQ_B2_C - [15:0] */
699#define WM9081_EQ_B2_C_SHIFT 0 /* EQ_B2_C - [15:0] */
700#define WM9081_EQ_B2_C_WIDTH 16 /* EQ_B2_C - [15:0] */
701
702/*
703 * R50 (0x32) - EQ 9
704 */
705#define WM9081_EQ_B2_PG_MASK 0xFFFF /* EQ_B2_PG - [15:0] */
706#define WM9081_EQ_B2_PG_SHIFT 0 /* EQ_B2_PG - [15:0] */
707#define WM9081_EQ_B2_PG_WIDTH 16 /* EQ_B2_PG - [15:0] */
708
709/*
710 * R51 (0x33) - EQ 10
711 */
712#define WM9081_EQ_B4_A_MASK 0xFFFF /* EQ_B4_A - [15:0] */
713#define WM9081_EQ_B4_A_SHIFT 0 /* EQ_B4_A - [15:0] */
714#define WM9081_EQ_B4_A_WIDTH 16 /* EQ_B4_A - [15:0] */
715
716/*
717 * R52 (0x34) - EQ 11
718 */
719#define WM9081_EQ_B4_B_MASK 0xFFFF /* EQ_B4_B - [15:0] */
720#define WM9081_EQ_B4_B_SHIFT 0 /* EQ_B4_B - [15:0] */
721#define WM9081_EQ_B4_B_WIDTH 16 /* EQ_B4_B - [15:0] */
722
723/*
724 * R53 (0x35) - EQ 12
725 */
726#define WM9081_EQ_B4_C_MASK 0xFFFF /* EQ_B4_C - [15:0] */
727#define WM9081_EQ_B4_C_SHIFT 0 /* EQ_B4_C - [15:0] */
728#define WM9081_EQ_B4_C_WIDTH 16 /* EQ_B4_C - [15:0] */
729
730/*
731 * R54 (0x36) - EQ 13
732 */
733#define WM9081_EQ_B4_PG_MASK 0xFFFF /* EQ_B4_PG - [15:0] */
734#define WM9081_EQ_B4_PG_SHIFT 0 /* EQ_B4_PG - [15:0] */
735#define WM9081_EQ_B4_PG_WIDTH 16 /* EQ_B4_PG - [15:0] */
736
737/*
738 * R55 (0x37) - EQ 14
739 */
740#define WM9081_EQ_B3_A_MASK 0xFFFF /* EQ_B3_A - [15:0] */
741#define WM9081_EQ_B3_A_SHIFT 0 /* EQ_B3_A - [15:0] */
742#define WM9081_EQ_B3_A_WIDTH 16 /* EQ_B3_A - [15:0] */
743
744/*
745 * R56 (0x38) - EQ 15
746 */
747#define WM9081_EQ_B3_B_MASK 0xFFFF /* EQ_B3_B - [15:0] */
748#define WM9081_EQ_B3_B_SHIFT 0 /* EQ_B3_B - [15:0] */
749#define WM9081_EQ_B3_B_WIDTH 16 /* EQ_B3_B - [15:0] */
750
751/*
752 * R57 (0x39) - EQ 16
753 */
754#define WM9081_EQ_B3_C_MASK 0xFFFF /* EQ_B3_C - [15:0] */
755#define WM9081_EQ_B3_C_SHIFT 0 /* EQ_B3_C - [15:0] */
756#define WM9081_EQ_B3_C_WIDTH 16 /* EQ_B3_C - [15:0] */
757
758/*
759 * R58 (0x3A) - EQ 17
760 */
761#define WM9081_EQ_B3_PG_MASK 0xFFFF /* EQ_B3_PG - [15:0] */
762#define WM9081_EQ_B3_PG_SHIFT 0 /* EQ_B3_PG - [15:0] */
763#define WM9081_EQ_B3_PG_WIDTH 16 /* EQ_B3_PG - [15:0] */
764
765/*
766 * R59 (0x3B) - EQ 18
767 */
768#define WM9081_EQ_B5_A_MASK 0xFFFF /* EQ_B5_A - [15:0] */
769#define WM9081_EQ_B5_A_SHIFT 0 /* EQ_B5_A - [15:0] */
770#define WM9081_EQ_B5_A_WIDTH 16 /* EQ_B5_A - [15:0] */
771
772/*
773 * R60 (0x3C) - EQ 19
774 */
775#define WM9081_EQ_B5_B_MASK 0xFFFF /* EQ_B5_B - [15:0] */
776#define WM9081_EQ_B5_B_SHIFT 0 /* EQ_B5_B - [15:0] */
777#define WM9081_EQ_B5_B_WIDTH 16 /* EQ_B5_B - [15:0] */
778
779/*
780 * R61 (0x3D) - EQ 20
781 */
782#define WM9081_EQ_B5_PG_MASK 0xFFFF /* EQ_B5_PG - [15:0] */
783#define WM9081_EQ_B5_PG_SHIFT 0 /* EQ_B5_PG - [15:0] */
784#define WM9081_EQ_B5_PG_WIDTH 16 /* EQ_B5_PG - [15:0] */
785
786
787#endif
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index c2d1a7a18fa3..fa88b463e71f 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -282,14 +282,14 @@ struct snd_soc_dai wm9705_dai[] = {
282 .channels_min = 1, 282 .channels_min = 1,
283 .channels_max = 2, 283 .channels_max = 2,
284 .rates = WM9705_AC97_RATES, 284 .rates = WM9705_AC97_RATES,
285 .formats = SNDRV_PCM_FMTBIT_S16_LE, 285 .formats = SND_SOC_STD_AC97_FMTS,
286 }, 286 },
287 .capture = { 287 .capture = {
288 .stream_name = "HiFi Capture", 288 .stream_name = "HiFi Capture",
289 .channels_min = 1, 289 .channels_min = 1,
290 .channels_max = 2, 290 .channels_max = 2,
291 .rates = WM9705_AC97_RATES, 291 .rates = WM9705_AC97_RATES,
292 .formats = SNDRV_PCM_FMTBIT_S16_LE, 292 .formats = SND_SOC_STD_AC97_FMTS,
293 }, 293 },
294 .ops = &wm9705_dai_ops, 294 .ops = &wm9705_dai_ops,
295 }, 295 },
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 765cf1e7369e..1fd4e88f50cf 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -534,13 +534,13 @@ struct snd_soc_dai wm9712_dai[] = {
534 .channels_min = 1, 534 .channels_min = 1,
535 .channels_max = 2, 535 .channels_max = 2,
536 .rates = WM9712_AC97_RATES, 536 .rates = WM9712_AC97_RATES,
537 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 537 .formats = SND_SOC_STD_AC97_FMTS,},
538 .capture = { 538 .capture = {
539 .stream_name = "HiFi Capture", 539 .stream_name = "HiFi Capture",
540 .channels_min = 1, 540 .channels_min = 1,
541 .channels_max = 2, 541 .channels_max = 2,
542 .rates = WM9712_AC97_RATES, 542 .rates = WM9712_AC97_RATES,
543 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 543 .formats = SND_SOC_STD_AC97_FMTS,},
544 .ops = &wm9712_dai_ops_hifi, 544 .ops = &wm9712_dai_ops_hifi,
545}, 545},
546{ 546{
@@ -550,7 +550,7 @@ struct snd_soc_dai wm9712_dai[] = {
550 .channels_min = 1, 550 .channels_min = 1,
551 .channels_max = 1, 551 .channels_max = 1,
552 .rates = WM9712_AC97_RATES, 552 .rates = WM9712_AC97_RATES,
553 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 553 .formats = SND_SOC_STD_AC97_FMTS,},
554 .ops = &wm9712_dai_ops_aux, 554 .ops = &wm9712_dai_ops_aux,
555} 555}
556}; 556};
@@ -585,6 +585,8 @@ static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
585 } 585 }
586 586
587 soc_ac97_ops.reset(codec->ac97); 587 soc_ac97_ops.reset(codec->ac97);
588 if (soc_ac97_ops.warm_reset)
589 soc_ac97_ops.warm_reset(codec->ac97);
588 if (ac97_read(codec, 0) != wm9712_reg[0]) 590 if (ac97_read(codec, 0) != wm9712_reg[0])
589 goto err; 591 goto err;
590 return 0; 592 return 0;
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 523bad077fa0..abed37acf787 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -189,6 +189,26 @@ SOC_SINGLE("3D Lower Cut-off Switch", AC97_REC_GAIN_MIC, 4, 1, 0),
189SOC_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1), 189SOC_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1),
190}; 190};
191 191
192static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
193 struct snd_kcontrol *kcontrol, int event)
194{
195 struct snd_soc_codec *codec = w->codec;
196 u16 status, rate;
197
198 BUG_ON(event != SND_SOC_DAPM_PRE_PMD);
199
200 /* Gracefully shut down the voice interface. */
201 status = ac97_read(codec, AC97_EXTENDED_MID) | 0x1000;
202 rate = ac97_read(codec, AC97_HANDSET_RATE) & 0xF0FF;
203 ac97_write(codec, AC97_HANDSET_RATE, rate | 0x0200);
204 schedule_timeout_interruptible(msecs_to_jiffies(1));
205 ac97_write(codec, AC97_HANDSET_RATE, rate | 0x0F00);
206 ac97_write(codec, AC97_EXTENDED_MID, status);
207
208 return 0;
209}
210
211
192/* We have to create a fake left and right HP mixers because 212/* We have to create a fake left and right HP mixers because
193 * the codec only has a single control that is shared by both channels. 213 * the codec only has a single control that is shared by both channels.
194 * This makes it impossible to determine the audio path using the current 214 * This makes it impossible to determine the audio path using the current
@@ -400,7 +420,8 @@ SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
400SND_SOC_DAPM_MIXER("HP Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), 420SND_SOC_DAPM_MIXER("HP Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
401SND_SOC_DAPM_MIXER("Line Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), 421SND_SOC_DAPM_MIXER("Line Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
402SND_SOC_DAPM_MIXER("Capture Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), 422SND_SOC_DAPM_MIXER("Capture Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
403SND_SOC_DAPM_DAC("Voice DAC", "Voice Playback", AC97_EXTENDED_MID, 12, 1), 423SND_SOC_DAPM_DAC_E("Voice DAC", "Voice Playback", AC97_EXTENDED_MID, 12, 1,
424 wm9713_voice_shutdown, SND_SOC_DAPM_PRE_PMD),
404SND_SOC_DAPM_DAC("Aux DAC", "Aux Playback", AC97_EXTENDED_MID, 11, 1), 425SND_SOC_DAPM_DAC("Aux DAC", "Aux Playback", AC97_EXTENDED_MID, 11, 1),
405SND_SOC_DAPM_PGA("Left ADC", AC97_EXTENDED_MID, 5, 1, NULL, 0), 426SND_SOC_DAPM_PGA("Left ADC", AC97_EXTENDED_MID, 5, 1, NULL, 0),
406SND_SOC_DAPM_PGA("Right ADC", AC97_EXTENDED_MID, 4, 1, NULL, 0), 427SND_SOC_DAPM_PGA("Right ADC", AC97_EXTENDED_MID, 4, 1, NULL, 0),
@@ -689,7 +710,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int source)
689 Ndiv = target / source; 710 Ndiv = target / source;
690 if ((Ndiv < 5) || (Ndiv > 12)) 711 if ((Ndiv < 5) || (Ndiv > 12))
691 printk(KERN_WARNING 712 printk(KERN_WARNING
692 "WM9713 PLL N value %d out of recommended range!\n", 713 "WM9713 PLL N value %u out of recommended range!\n",
693 Ndiv); 714 Ndiv);
694 715
695 pll_div->n = Ndiv; 716 pll_div->n = Ndiv;
@@ -936,21 +957,6 @@ static int wm9713_pcm_hw_params(struct snd_pcm_substream *substream,
936 return 0; 957 return 0;
937} 958}
938 959
939static void wm9713_voiceshutdown(struct snd_pcm_substream *substream,
940 struct snd_soc_dai *dai)
941{
942 struct snd_soc_codec *codec = dai->codec;
943 u16 status, rate;
944
945 /* Gracefully shut down the voice interface. */
946 status = ac97_read(codec, AC97_EXTENDED_STATUS) | 0x1000;
947 rate = ac97_read(codec, AC97_HANDSET_RATE) & 0xF0FF;
948 ac97_write(codec, AC97_HANDSET_RATE, rate | 0x0200);
949 schedule_timeout_interruptible(msecs_to_jiffies(1));
950 ac97_write(codec, AC97_HANDSET_RATE, rate | 0x0F00);
951 ac97_write(codec, AC97_EXTENDED_MID, status);
952}
953
954static int ac97_hifi_prepare(struct snd_pcm_substream *substream, 960static int ac97_hifi_prepare(struct snd_pcm_substream *substream,
955 struct snd_soc_dai *dai) 961 struct snd_soc_dai *dai)
956{ 962{
@@ -1019,7 +1025,6 @@ static struct snd_soc_dai_ops wm9713_dai_ops_aux = {
1019 1025
1020static struct snd_soc_dai_ops wm9713_dai_ops_voice = { 1026static struct snd_soc_dai_ops wm9713_dai_ops_voice = {
1021 .hw_params = wm9713_pcm_hw_params, 1027 .hw_params = wm9713_pcm_hw_params,
1022 .shutdown = wm9713_voiceshutdown,
1023 .set_clkdiv = wm9713_set_dai_clkdiv, 1028 .set_clkdiv = wm9713_set_dai_clkdiv,
1024 .set_pll = wm9713_set_dai_pll, 1029 .set_pll = wm9713_set_dai_pll,
1025 .set_fmt = wm9713_set_dai_fmt, 1030 .set_fmt = wm9713_set_dai_fmt,
@@ -1035,13 +1040,13 @@ struct snd_soc_dai wm9713_dai[] = {
1035 .channels_min = 1, 1040 .channels_min = 1,
1036 .channels_max = 2, 1041 .channels_max = 2,
1037 .rates = WM9713_RATES, 1042 .rates = WM9713_RATES,
1038 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 1043 .formats = SND_SOC_STD_AC97_FMTS,},
1039 .capture = { 1044 .capture = {
1040 .stream_name = "HiFi Capture", 1045 .stream_name = "HiFi Capture",
1041 .channels_min = 1, 1046 .channels_min = 1,
1042 .channels_max = 2, 1047 .channels_max = 2,
1043 .rates = WM9713_RATES, 1048 .rates = WM9713_RATES,
1044 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 1049 .formats = SND_SOC_STD_AC97_FMTS,},
1045 .ops = &wm9713_dai_ops_hifi, 1050 .ops = &wm9713_dai_ops_hifi,
1046 }, 1051 },
1047 { 1052 {
@@ -1051,7 +1056,7 @@ struct snd_soc_dai wm9713_dai[] = {
1051 .channels_min = 1, 1056 .channels_min = 1,
1052 .channels_max = 1, 1057 .channels_max = 1,
1053 .rates = WM9713_RATES, 1058 .rates = WM9713_RATES,
1054 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 1059 .formats = SND_SOC_STD_AC97_FMTS,},
1055 .ops = &wm9713_dai_ops_aux, 1060 .ops = &wm9713_dai_ops_aux,
1056 }, 1061 },
1057 { 1062 {
@@ -1069,6 +1074,7 @@ struct snd_soc_dai wm9713_dai[] = {
1069 .rates = WM9713_PCM_RATES, 1074 .rates = WM9713_PCM_RATES,
1070 .formats = WM9713_PCM_FORMATS,}, 1075 .formats = WM9713_PCM_FORMATS,},
1071 .ops = &wm9713_dai_ops_voice, 1076 .ops = &wm9713_dai_ops_voice,
1077 .symmetric_rates = 1,
1072 }, 1078 },
1073}; 1079};
1074EXPORT_SYMBOL_GPL(wm9713_dai); 1080EXPORT_SYMBOL_GPL(wm9713_dai);
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 9fc908283371..5dbebf82249c 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -1,5 +1,8 @@
1config SND_SOC_OF_SIMPLE 1config SND_SOC_OF_SIMPLE
2 tristate 2 tristate
3
4config SND_MPC52xx_DMA
5 tristate
3 6
4# ASoC platform support for the Freescale MPC8610 SOC. This compiles drivers 7# ASoC platform support for the Freescale MPC8610 SOC. This compiles drivers
5# for the SSI and the Elo DMA controller. You will still need to select 8# for the SSI and the Elo DMA controller. You will still need to select
@@ -22,7 +25,34 @@ config SND_SOC_MPC8610_HPCD
22config SND_SOC_MPC5200_I2S 25config SND_SOC_MPC5200_I2S
23 tristate "Freescale MPC5200 PSC in I2S mode driver" 26 tristate "Freescale MPC5200 PSC in I2S mode driver"
24 depends on PPC_MPC52xx && PPC_BESTCOMM 27 depends on PPC_MPC52xx && PPC_BESTCOMM
25 select SND_SOC_OF_SIMPLE 28 select SND_MPC52xx_DMA
26 select PPC_BESTCOMM_GEN_BD 29 select PPC_BESTCOMM_GEN_BD
27 help 30 help
28 Say Y here to support the MPC5200 PSCs in I2S mode. 31 Say Y here to support the MPC5200 PSCs in I2S mode.
32
33config SND_SOC_MPC5200_AC97
34 tristate "Freescale MPC5200 PSC in AC97 mode driver"
35 depends on PPC_MPC52xx && PPC_BESTCOMM
36 select AC97_BUS
37 select SND_MPC52xx_DMA
38 select PPC_BESTCOMM_GEN_BD
39 help
40 Say Y here to support the MPC5200 PSCs in AC97 mode.
41
42config SND_MPC52xx_SOC_PCM030
43 tristate "SoC AC97 Audio support for Phytec pcm030 and WM9712"
44 depends on PPC_MPC5200_SIMPLE && BROKEN
45 select SND_SOC_MPC5200_AC97
46 select SND_SOC_WM9712
47 help
48 Say Y if you want to add support for sound on the Phytec pcm030
49 baseboard.
50
51config SND_MPC52xx_SOC_EFIKA
52 tristate "SoC AC97 Audio support for bbplan Efika and STAC9766"
53 depends on PPC_EFIKA && BROKEN
54 select SND_SOC_MPC5200_AC97
55 select SND_SOC_STAC9766
56 help
57 Say Y if you want to add support for sound on the Efika.
58
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index f85134c86387..a83a73967ec6 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -10,5 +10,12 @@ snd-soc-fsl-ssi-objs := fsl_ssi.o
10snd-soc-fsl-dma-objs := fsl_dma.o 10snd-soc-fsl-dma-objs := fsl_dma.o
11obj-$(CONFIG_SND_SOC_MPC8610) += snd-soc-fsl-ssi.o snd-soc-fsl-dma.o 11obj-$(CONFIG_SND_SOC_MPC8610) += snd-soc-fsl-ssi.o snd-soc-fsl-dma.o
12 12
13# MPC5200 Platform Support
14obj-$(CONFIG_SND_MPC52xx_DMA) += mpc5200_dma.o
13obj-$(CONFIG_SND_SOC_MPC5200_I2S) += mpc5200_psc_i2s.o 15obj-$(CONFIG_SND_SOC_MPC5200_I2S) += mpc5200_psc_i2s.o
16obj-$(CONFIG_SND_SOC_MPC5200_AC97) += mpc5200_psc_ac97.o
17
18# MPC5200 Machine Support
19obj-$(CONFIG_SND_MPC52xx_SOC_PCM030) += pcm030-audio-fabric.o
20obj-$(CONFIG_SND_MPC52xx_SOC_EFIKA) += efika-audio-fabric.o
14 21
diff --git a/sound/soc/fsl/efika-audio-fabric.c b/sound/soc/fsl/efika-audio-fabric.c
new file mode 100644
index 000000000000..85b0e7569504
--- /dev/null
+++ b/sound/soc/fsl/efika-audio-fabric.c
@@ -0,0 +1,90 @@
1/*
2 * Efika driver for the PSC of the Freescale MPC52xx
3 * configured as AC97 interface
4 *
5 * Copyright 2008 Jon Smirl, Digispeaker
6 * Author: Jon Smirl <jonsmirl@gmail.com>
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/device.h>
17#include <linux/delay.h>
18#include <linux/of_device.h>
19#include <linux/of_platform.h>
20#include <linux/dma-mapping.h>
21
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/initval.h>
26#include <sound/soc.h>
27#include <sound/soc-of-simple.h>
28
29#include "mpc5200_dma.h"
30#include "mpc5200_psc_ac97.h"
31#include "../codecs/stac9766.h"
32
33static struct snd_soc_device device;
34static struct snd_soc_card card;
35
36static struct snd_soc_dai_link efika_fabric_dai[] = {
37{
38 .name = "AC97",
39 .stream_name = "AC97 Analog",
40 .codec_dai = &stac9766_dai[STAC9766_DAI_AC97_ANALOG],
41 .cpu_dai = &psc_ac97_dai[MPC5200_AC97_NORMAL],
42},
43{
44 .name = "AC97",
45 .stream_name = "AC97 IEC958",
46 .codec_dai = &stac9766_dai[STAC9766_DAI_AC97_DIGITAL],
47 .cpu_dai = &psc_ac97_dai[MPC5200_AC97_SPDIF],
48},
49};
50
51static __init int efika_fabric_init(void)
52{
53 struct platform_device *pdev;
54 int rc;
55
56 if (!machine_is_compatible("bplan,efika"))
57 return -ENODEV;
58
59 card.platform = &mpc5200_audio_dma_platform;
60 card.name = "Efika";
61 card.dai_link = efika_fabric_dai;
62 card.num_links = ARRAY_SIZE(efika_fabric_dai);
63
64 device.card = &card;
65 device.codec_dev = &soc_codec_dev_stac9766;
66
67 pdev = platform_device_alloc("soc-audio", 1);
68 if (!pdev) {
69 pr_err("efika_fabric_init: platform_device_alloc() failed\n");
70 return -ENODEV;
71 }
72
73 platform_set_drvdata(pdev, &device);
74 device.dev = &pdev->dev;
75
76 rc = platform_device_add(pdev);
77 if (rc) {
78 pr_err("efika_fabric_init: platform_device_add() failed\n");
79 return -ENODEV;
80 }
81 return 0;
82}
83
84module_init(efika_fabric_init);
85
86
87MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
88MODULE_DESCRIPTION(DRV_NAME ": mpc5200 Efika fabric driver");
89MODULE_LICENSE("GPL");
90
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 3711d8454d96..93f0f38a32c9 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -375,18 +375,14 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
375 struct snd_pcm_runtime *first_runtime = 375 struct snd_pcm_runtime *first_runtime =
376 ssi_private->first_stream->runtime; 376 ssi_private->first_stream->runtime;
377 377
378 if (!first_runtime->rate || !first_runtime->sample_bits) { 378 if (!first_runtime->sample_bits) {
379 dev_err(substream->pcm->card->dev, 379 dev_err(substream->pcm->card->dev,
380 "set sample rate and size in %s stream first\n", 380 "set sample size in %s stream first\n",
381 substream->stream == SNDRV_PCM_STREAM_PLAYBACK 381 substream->stream == SNDRV_PCM_STREAM_PLAYBACK
382 ? "capture" : "playback"); 382 ? "capture" : "playback");
383 return -EAGAIN; 383 return -EAGAIN;
384 } 384 }
385 385
386 snd_pcm_hw_constraint_minmax(substream->runtime,
387 SNDRV_PCM_HW_PARAM_RATE,
388 first_runtime->rate, first_runtime->rate);
389
390 /* If we're in synchronous mode, then we need to constrain 386 /* If we're in synchronous mode, then we need to constrain
391 * the sample size as well. We don't support independent sample 387 * the sample size as well. We don't support independent sample
392 * rates in asynchronous mode. 388 * rates in asynchronous mode.
@@ -674,7 +670,7 @@ struct snd_soc_dai *fsl_ssi_create_dai(struct fsl_ssi_info *ssi_info)
674 ssi_private->dev = ssi_info->dev; 670 ssi_private->dev = ssi_info->dev;
675 ssi_private->asynchronous = ssi_info->asynchronous; 671 ssi_private->asynchronous = ssi_info->asynchronous;
676 672
677 ssi_private->dev->driver_data = fsl_ssi_dai; 673 dev_set_drvdata(ssi_private->dev, fsl_ssi_dai);
678 674
679 /* Initialize the the device_attribute structure */ 675 /* Initialize the the device_attribute structure */
680 dev_attr->attr.name = "ssi-stats"; 676 dev_attr->attr.name = "ssi-stats";
@@ -693,6 +689,7 @@ struct snd_soc_dai *fsl_ssi_create_dai(struct fsl_ssi_info *ssi_info)
693 fsl_ssi_dai->name = ssi_private->name; 689 fsl_ssi_dai->name = ssi_private->name;
694 fsl_ssi_dai->id = ssi_info->id; 690 fsl_ssi_dai->id = ssi_info->id;
695 fsl_ssi_dai->dev = ssi_info->dev; 691 fsl_ssi_dai->dev = ssi_info->dev;
692 fsl_ssi_dai->symmetric_rates = 1;
696 693
697 ret = snd_soc_register_dai(fsl_ssi_dai); 694 ret = snd_soc_register_dai(fsl_ssi_dai);
698 if (ret != 0) { 695 if (ret != 0) {
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
new file mode 100644
index 000000000000..efec33a1c5bd
--- /dev/null
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -0,0 +1,564 @@
1/*
2 * Freescale MPC5200 PSC DMA
3 * ALSA SoC Platform driver
4 *
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 * Copyright (C) 2009 Jon Smirl, Digispeaker
7 */
8
9#include <linux/module.h>
10#include <linux/of_device.h>
11
12#include <sound/soc.h>
13
14#include <sysdev/bestcomm/bestcomm.h>
15#include <sysdev/bestcomm/gen_bd.h>
16#include <asm/mpc52xx_psc.h>
17
18#include "mpc5200_dma.h"
19
20/*
21 * Interrupt handlers
22 */
23static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma)
24{
25 struct psc_dma *psc_dma = _psc_dma;
26 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
27 u16 isr;
28
29 isr = in_be16(&regs->mpc52xx_psc_isr);
30
31 /* Playback underrun error */
32 if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP))
33 psc_dma->stats.underrun_count++;
34
35 /* Capture overrun error */
36 if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR))
37 psc_dma->stats.overrun_count++;
38
39 out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
40
41 return IRQ_HANDLED;
42}
43
44/**
45 * psc_dma_bcom_enqueue_next_buffer - Enqueue another audio buffer
46 * @s: pointer to stream private data structure
47 *
48 * Enqueues another audio period buffer into the bestcomm queue.
49 *
50 * Note: The routine must only be called when there is space available in
51 * the queue. Otherwise the enqueue will fail and the audio ring buffer
52 * will get out of sync
53 */
54static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
55{
56 struct bcom_bd *bd;
57
58 /* Prepare and enqueue the next buffer descriptor */
59 bd = bcom_prepare_next_buffer(s->bcom_task);
60 bd->status = s->period_bytes;
61 bd->data[0] = s->period_next_pt;
62 bcom_submit_next_buffer(s->bcom_task, NULL);
63
64 /* Update for next period */
65 s->period_next_pt += s->period_bytes;
66 if (s->period_next_pt >= s->period_end)
67 s->period_next_pt = s->period_start;
68}
69
70static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s)
71{
72 while (s->appl_ptr < s->runtime->control->appl_ptr) {
73
74 if (bcom_queue_full(s->bcom_task))
75 return;
76
77 s->appl_ptr += s->period_size;
78
79 psc_dma_bcom_enqueue_next_buffer(s);
80 }
81}
82
83/* Bestcomm DMA irq handler */
84static irqreturn_t psc_dma_bcom_irq_tx(int irq, void *_psc_dma_stream)
85{
86 struct psc_dma_stream *s = _psc_dma_stream;
87
88 spin_lock(&s->psc_dma->lock);
89 /* For each finished period, dequeue the completed period buffer
90 * and enqueue a new one in it's place. */
91 while (bcom_buffer_done(s->bcom_task)) {
92 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
93
94 s->period_current_pt += s->period_bytes;
95 if (s->period_current_pt >= s->period_end)
96 s->period_current_pt = s->period_start;
97 }
98 psc_dma_bcom_enqueue_tx(s);
99 spin_unlock(&s->psc_dma->lock);
100
101 /* If the stream is active, then also inform the PCM middle layer
102 * of the period finished event. */
103 if (s->active)
104 snd_pcm_period_elapsed(s->stream);
105
106 return IRQ_HANDLED;
107}
108
109static irqreturn_t psc_dma_bcom_irq_rx(int irq, void *_psc_dma_stream)
110{
111 struct psc_dma_stream *s = _psc_dma_stream;
112
113 spin_lock(&s->psc_dma->lock);
114 /* For each finished period, dequeue the completed period buffer
115 * and enqueue a new one in it's place. */
116 while (bcom_buffer_done(s->bcom_task)) {
117 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
118
119 s->period_current_pt += s->period_bytes;
120 if (s->period_current_pt >= s->period_end)
121 s->period_current_pt = s->period_start;
122
123 psc_dma_bcom_enqueue_next_buffer(s);
124 }
125 spin_unlock(&s->psc_dma->lock);
126
127 /* If the stream is active, then also inform the PCM middle layer
128 * of the period finished event. */
129 if (s->active)
130 snd_pcm_period_elapsed(s->stream);
131
132 return IRQ_HANDLED;
133}
134
135static int psc_dma_hw_free(struct snd_pcm_substream *substream)
136{
137 snd_pcm_set_runtime_buffer(substream, NULL);
138 return 0;
139}
140
141/**
142 * psc_dma_trigger: start and stop the DMA transfer.
143 *
144 * This function is called by ALSA to start, stop, pause, and resume the DMA
145 * transfer of data.
146 */
147static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
148{
149 struct snd_soc_pcm_runtime *rtd = substream->private_data;
150 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
151 struct snd_pcm_runtime *runtime = substream->runtime;
152 struct psc_dma_stream *s;
153 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
154 u16 imr;
155 unsigned long flags;
156 int i;
157
158 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
159 s = &psc_dma->capture;
160 else
161 s = &psc_dma->playback;
162
163 dev_dbg(psc_dma->dev, "psc_dma_trigger(substream=%p, cmd=%i)"
164 " stream_id=%i\n",
165 substream, cmd, substream->pstr->stream);
166
167 switch (cmd) {
168 case SNDRV_PCM_TRIGGER_START:
169 s->period_bytes = frames_to_bytes(runtime,
170 runtime->period_size);
171 s->period_start = virt_to_phys(runtime->dma_area);
172 s->period_end = s->period_start +
173 (s->period_bytes * runtime->periods);
174 s->period_next_pt = s->period_start;
175 s->period_current_pt = s->period_start;
176 s->period_size = runtime->period_size;
177 s->active = 1;
178
179 /* track appl_ptr so that we have a better chance of detecting
180 * end of stream and not over running it.
181 */
182 s->runtime = runtime;
183 s->appl_ptr = s->runtime->control->appl_ptr -
184 (runtime->period_size * runtime->periods);
185
186 /* Fill up the bestcomm bd queue and enable DMA.
187 * This will begin filling the PSC's fifo.
188 */
189 spin_lock_irqsave(&psc_dma->lock, flags);
190
191 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) {
192 bcom_gen_bd_rx_reset(s->bcom_task);
193 for (i = 0; i < runtime->periods; i++)
194 if (!bcom_queue_full(s->bcom_task))
195 psc_dma_bcom_enqueue_next_buffer(s);
196 } else {
197 bcom_gen_bd_tx_reset(s->bcom_task);
198 psc_dma_bcom_enqueue_tx(s);
199 }
200
201 bcom_enable(s->bcom_task);
202 spin_unlock_irqrestore(&psc_dma->lock, flags);
203
204 out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
205
206 break;
207
208 case SNDRV_PCM_TRIGGER_STOP:
209 s->active = 0;
210
211 spin_lock_irqsave(&psc_dma->lock, flags);
212 bcom_disable(s->bcom_task);
213 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
214 bcom_gen_bd_rx_reset(s->bcom_task);
215 else
216 bcom_gen_bd_tx_reset(s->bcom_task);
217 spin_unlock_irqrestore(&psc_dma->lock, flags);
218
219 break;
220
221 default:
222 dev_dbg(psc_dma->dev, "invalid command\n");
223 return -EINVAL;
224 }
225
226 /* Update interrupt enable settings */
227 imr = 0;
228 if (psc_dma->playback.active)
229 imr |= MPC52xx_PSC_IMR_TXEMP;
230 if (psc_dma->capture.active)
231 imr |= MPC52xx_PSC_IMR_ORERR;
232 out_be16(&regs->isr_imr.imr, psc_dma->imr | imr);
233
234 return 0;
235}
236
237
238/* ---------------------------------------------------------------------
239 * The PSC DMA 'ASoC platform' driver
240 *
241 * Can be referenced by an 'ASoC machine' driver
242 * This driver only deals with the audio bus; it doesn't have any
243 * interaction with the attached codec
244 */
245
246static const struct snd_pcm_hardware psc_dma_hardware = {
247 .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
248 SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
249 SNDRV_PCM_INFO_BATCH,
250 .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |
251 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE,
252 .rate_min = 8000,
253 .rate_max = 48000,
254 .channels_min = 1,
255 .channels_max = 2,
256 .period_bytes_max = 1024 * 1024,
257 .period_bytes_min = 32,
258 .periods_min = 2,
259 .periods_max = 256,
260 .buffer_bytes_max = 2 * 1024 * 1024,
261 .fifo_size = 512,
262};
263
264static int psc_dma_open(struct snd_pcm_substream *substream)
265{
266 struct snd_pcm_runtime *runtime = substream->runtime;
267 struct snd_soc_pcm_runtime *rtd = substream->private_data;
268 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
269 struct psc_dma_stream *s;
270 int rc;
271
272 dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream);
273
274 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
275 s = &psc_dma->capture;
276 else
277 s = &psc_dma->playback;
278
279 snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware);
280
281 rc = snd_pcm_hw_constraint_integer(runtime,
282 SNDRV_PCM_HW_PARAM_PERIODS);
283 if (rc < 0) {
284 dev_err(substream->pcm->card->dev, "invalid buffer size\n");
285 return rc;
286 }
287
288 s->stream = substream;
289 return 0;
290}
291
292static int psc_dma_close(struct snd_pcm_substream *substream)
293{
294 struct snd_soc_pcm_runtime *rtd = substream->private_data;
295 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
296 struct psc_dma_stream *s;
297
298 dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream);
299
300 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
301 s = &psc_dma->capture;
302 else
303 s = &psc_dma->playback;
304
305 if (!psc_dma->playback.active &&
306 !psc_dma->capture.active) {
307
308 /* Disable all interrupts and reset the PSC */
309 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
310 out_8(&psc_dma->psc_regs->command, 4 << 4); /* reset error */
311 }
312 s->stream = NULL;
313 return 0;
314}
315
316static snd_pcm_uframes_t
317psc_dma_pointer(struct snd_pcm_substream *substream)
318{
319 struct snd_soc_pcm_runtime *rtd = substream->private_data;
320 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
321 struct psc_dma_stream *s;
322 dma_addr_t count;
323
324 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
325 s = &psc_dma->capture;
326 else
327 s = &psc_dma->playback;
328
329 count = s->period_current_pt - s->period_start;
330
331 return bytes_to_frames(substream->runtime, count);
332}
333
334static int
335psc_dma_hw_params(struct snd_pcm_substream *substream,
336 struct snd_pcm_hw_params *params)
337{
338 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
339
340 return 0;
341}
342
343static struct snd_pcm_ops psc_dma_ops = {
344 .open = psc_dma_open,
345 .close = psc_dma_close,
346 .hw_free = psc_dma_hw_free,
347 .ioctl = snd_pcm_lib_ioctl,
348 .pointer = psc_dma_pointer,
349 .trigger = psc_dma_trigger,
350 .hw_params = psc_dma_hw_params,
351};
352
353static u64 psc_dma_dmamask = 0xffffffff;
354static int psc_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
355 struct snd_pcm *pcm)
356{
357 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
358 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
359 size_t size = psc_dma_hardware.buffer_bytes_max;
360 int rc = 0;
361
362 dev_dbg(rtd->socdev->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
363 card, dai, pcm);
364
365 if (!card->dev->dma_mask)
366 card->dev->dma_mask = &psc_dma_dmamask;
367 if (!card->dev->coherent_dma_mask)
368 card->dev->coherent_dma_mask = 0xffffffff;
369
370 if (pcm->streams[0].substream) {
371 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
372 size, &pcm->streams[0].substream->dma_buffer);
373 if (rc)
374 goto playback_alloc_err;
375 }
376
377 if (pcm->streams[1].substream) {
378 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
379 size, &pcm->streams[1].substream->dma_buffer);
380 if (rc)
381 goto capture_alloc_err;
382 }
383
384 if (rtd->socdev->card->codec->ac97)
385 rtd->socdev->card->codec->ac97->private_data = psc_dma;
386
387 return 0;
388
389 capture_alloc_err:
390 if (pcm->streams[0].substream)
391 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
392
393 playback_alloc_err:
394 dev_err(card->dev, "Cannot allocate buffer(s)\n");
395
396 return -ENOMEM;
397}
398
399static void psc_dma_free(struct snd_pcm *pcm)
400{
401 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
402 struct snd_pcm_substream *substream;
403 int stream;
404
405 dev_dbg(rtd->socdev->dev, "psc_dma_free(pcm=%p)\n", pcm);
406
407 for (stream = 0; stream < 2; stream++) {
408 substream = pcm->streams[stream].substream;
409 if (substream) {
410 snd_dma_free_pages(&substream->dma_buffer);
411 substream->dma_buffer.area = NULL;
412 substream->dma_buffer.addr = 0;
413 }
414 }
415}
416
417struct snd_soc_platform mpc5200_audio_dma_platform = {
418 .name = "mpc5200-psc-audio",
419 .pcm_ops = &psc_dma_ops,
420 .pcm_new = &psc_dma_new,
421 .pcm_free = &psc_dma_free,
422};
423EXPORT_SYMBOL_GPL(mpc5200_audio_dma_platform);
424
425int mpc5200_audio_dma_create(struct of_device *op)
426{
427 phys_addr_t fifo;
428 struct psc_dma *psc_dma;
429 struct resource res;
430 int size, irq, rc;
431 const __be32 *prop;
432 void __iomem *regs;
433
434 /* Fetch the registers and IRQ of the PSC */
435 irq = irq_of_parse_and_map(op->node, 0);
436 if (of_address_to_resource(op->node, 0, &res)) {
437 dev_err(&op->dev, "Missing reg property\n");
438 return -ENODEV;
439 }
440 regs = ioremap(res.start, 1 + res.end - res.start);
441 if (!regs) {
442 dev_err(&op->dev, "Could not map registers\n");
443 return -ENODEV;
444 }
445
446 /* Allocate and initialize the driver private data */
447 psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL);
448 if (!psc_dma) {
449 iounmap(regs);
450 return -ENOMEM;
451 }
452
453 /* Get the PSC ID */
454 prop = of_get_property(op->node, "cell-index", &size);
455 if (!prop || size < sizeof *prop)
456 return -ENODEV;
457
458 spin_lock_init(&psc_dma->lock);
459 psc_dma->id = be32_to_cpu(*prop);
460 psc_dma->irq = irq;
461 psc_dma->psc_regs = regs;
462 psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs;
463 psc_dma->dev = &op->dev;
464 psc_dma->playback.psc_dma = psc_dma;
465 psc_dma->capture.psc_dma = psc_dma;
466 snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id);
467
468 /* Find the address of the fifo data registers and setup the
469 * DMA tasks */
470 fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32);
471 psc_dma->capture.bcom_task =
472 bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512);
473 psc_dma->playback.bcom_task =
474 bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo);
475 if (!psc_dma->capture.bcom_task ||
476 !psc_dma->playback.bcom_task) {
477 dev_err(&op->dev, "Could not allocate bestcomm tasks\n");
478 iounmap(regs);
479 kfree(psc_dma);
480 return -ENODEV;
481 }
482
483 /* Disable all interrupts and reset the PSC */
484 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
485 /* reset receiver */
486 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX);
487 /* reset transmitter */
488 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX);
489 /* reset error */
490 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT);
491 /* reset mode */
492 out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1);
493
494 /* Set up mode register;
495 * First write: RxRdy (FIFO Alarm) generates rx FIFO irq
496 * Second write: register Normal mode for non loopback
497 */
498 out_8(&psc_dma->psc_regs->mode, 0);
499 out_8(&psc_dma->psc_regs->mode, 0);
500
501 /* Set the TX and RX fifo alarm thresholds */
502 out_be16(&psc_dma->fifo_regs->rfalarm, 0x100);
503 out_8(&psc_dma->fifo_regs->rfcntl, 0x4);
504 out_be16(&psc_dma->fifo_regs->tfalarm, 0x100);
505 out_8(&psc_dma->fifo_regs->tfcntl, 0x7);
506
507 /* Lookup the IRQ numbers */
508 psc_dma->playback.irq =
509 bcom_get_task_irq(psc_dma->playback.bcom_task);
510 psc_dma->capture.irq =
511 bcom_get_task_irq(psc_dma->capture.bcom_task);
512
513 rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED,
514 "psc-dma-status", psc_dma);
515 rc |= request_irq(psc_dma->capture.irq,
516 &psc_dma_bcom_irq_rx, IRQF_SHARED,
517 "psc-dma-capture", &psc_dma->capture);
518 rc |= request_irq(psc_dma->playback.irq,
519 &psc_dma_bcom_irq_tx, IRQF_SHARED,
520 "psc-dma-playback", &psc_dma->playback);
521 if (rc) {
522 free_irq(psc_dma->irq, psc_dma);
523 free_irq(psc_dma->capture.irq,
524 &psc_dma->capture);
525 free_irq(psc_dma->playback.irq,
526 &psc_dma->playback);
527 return -ENODEV;
528 }
529
530 /* Save what we've done so it can be found again later */
531 dev_set_drvdata(&op->dev, psc_dma);
532
533 /* Tell the ASoC OF helpers about it */
534 return snd_soc_register_platform(&mpc5200_audio_dma_platform);
535}
536EXPORT_SYMBOL_GPL(mpc5200_audio_dma_create);
537
538int mpc5200_audio_dma_destroy(struct of_device *op)
539{
540 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
541
542 dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n");
543
544 snd_soc_unregister_platform(&mpc5200_audio_dma_platform);
545
546 bcom_gen_bd_rx_release(psc_dma->capture.bcom_task);
547 bcom_gen_bd_tx_release(psc_dma->playback.bcom_task);
548
549 /* Release irqs */
550 free_irq(psc_dma->irq, psc_dma);
551 free_irq(psc_dma->capture.irq, &psc_dma->capture);
552 free_irq(psc_dma->playback.irq, &psc_dma->playback);
553
554 iounmap(psc_dma->psc_regs);
555 kfree(psc_dma);
556 dev_set_drvdata(&op->dev, NULL);
557
558 return 0;
559}
560EXPORT_SYMBOL_GPL(mpc5200_audio_dma_destroy);
561
562MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
563MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver");
564MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/mpc5200_dma.h b/sound/soc/fsl/mpc5200_dma.h
new file mode 100644
index 000000000000..2000803f06a7
--- /dev/null
+++ b/sound/soc/fsl/mpc5200_dma.h
@@ -0,0 +1,80 @@
1/*
2 * Freescale MPC5200 Audio DMA driver
3 */
4
5#ifndef __SOUND_SOC_FSL_MPC5200_DMA_H__
6#define __SOUND_SOC_FSL_MPC5200_DMA_H__
7
8#define PSC_STREAM_NAME_LEN 32
9
10/**
11 * psc_ac97_stream - Data specific to a single stream (playback or capture)
12 * @active: flag indicating if the stream is active
13 * @psc_dma: pointer back to parent psc_dma data structure
14 * @bcom_task: bestcomm task structure
15 * @irq: irq number for bestcomm task
16 * @period_start: physical address of start of DMA region
17 * @period_end: physical address of end of DMA region
18 * @period_next_pt: physical address of next DMA buffer to enqueue
19 * @period_bytes: size of DMA period in bytes
20 */
21struct psc_dma_stream {
22 struct snd_pcm_runtime *runtime;
23 snd_pcm_uframes_t appl_ptr;
24
25 int active;
26 struct psc_dma *psc_dma;
27 struct bcom_task *bcom_task;
28 int irq;
29 struct snd_pcm_substream *stream;
30 dma_addr_t period_start;
31 dma_addr_t period_end;
32 dma_addr_t period_next_pt;
33 dma_addr_t period_current_pt;
34 int period_bytes;
35 int period_size;
36};
37
38/**
39 * psc_dma - Private driver data
40 * @name: short name for this device ("PSC0", "PSC1", etc)
41 * @psc_regs: pointer to the PSC's registers
42 * @fifo_regs: pointer to the PSC's FIFO registers
43 * @irq: IRQ of this PSC
44 * @dev: struct device pointer
45 * @dai: the CPU DAI for this device
46 * @sicr: Base value used in serial interface control register; mode is ORed
47 * with this value.
48 * @playback: Playback stream context data
49 * @capture: Capture stream context data
50 */
51struct psc_dma {
52 char name[32];
53 struct mpc52xx_psc __iomem *psc_regs;
54 struct mpc52xx_psc_fifo __iomem *fifo_regs;
55 unsigned int irq;
56 struct device *dev;
57 spinlock_t lock;
58 u32 sicr;
59 uint sysclk;
60 int imr;
61 int id;
62 unsigned int slots;
63
64 /* per-stream data */
65 struct psc_dma_stream playback;
66 struct psc_dma_stream capture;
67
68 /* Statistics */
69 struct {
70 unsigned long overrun_count;
71 unsigned long underrun_count;
72 } stats;
73};
74
75int mpc5200_audio_dma_create(struct of_device *op);
76int mpc5200_audio_dma_destroy(struct of_device *op);
77
78extern struct snd_soc_platform mpc5200_audio_dma_platform;
79
80#endif /* __SOUND_SOC_FSL_MPC5200_DMA_H__ */
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
new file mode 100644
index 000000000000..794a247b3eb5
--- /dev/null
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -0,0 +1,329 @@
1/*
2 * linux/sound/mpc5200-ac97.c -- AC97 support for the Freescale MPC52xx chip.
3 *
4 * Copyright (C) 2009 Jon Smirl, Digispeaker
5 * Author: Jon Smirl <jonsmirl@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/of_device.h>
14#include <linux/of_platform.h>
15
16#include <sound/pcm.h>
17#include <sound/pcm_params.h>
18#include <sound/soc.h>
19
20#include <asm/time.h>
21#include <asm/delay.h>
22#include <asm/mpc52xx_psc.h>
23
24#include "mpc5200_dma.h"
25#include "mpc5200_psc_ac97.h"
26
27#define DRV_NAME "mpc5200-psc-ac97"
28
29/* ALSA only supports a single AC97 device so static is recommend here */
30static struct psc_dma *psc_dma;
31
32static unsigned short psc_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
33{
34 int status;
35 unsigned int val;
36
37 /* Wait for command send status zero = ready */
38 status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) &
39 MPC52xx_PSC_SR_CMDSEND), 100, 0);
40 if (status == 0) {
41 pr_err("timeout on ac97 bus (rdy)\n");
42 return -ENODEV;
43 }
44 /* Send the read */
45 out_be32(&psc_dma->psc_regs->ac97_cmd, (1<<31) | ((reg & 0x7f) << 24));
46
47 /* Wait for the answer */
48 status = spin_event_timeout((in_be16(&psc_dma->psc_regs->sr_csr.status) &
49 MPC52xx_PSC_SR_DATA_VAL), 100, 0);
50 if (status == 0) {
51 pr_err("timeout on ac97 read (val) %x\n",
52 in_be16(&psc_dma->psc_regs->sr_csr.status));
53 return -ENODEV;
54 }
55 /* Get the data */
56 val = in_be32(&psc_dma->psc_regs->ac97_data);
57 if (((val >> 24) & 0x7f) != reg) {
58 pr_err("reg echo error on ac97 read\n");
59 return -ENODEV;
60 }
61 val = (val >> 8) & 0xffff;
62
63 return (unsigned short) val;
64}
65
66static void psc_ac97_write(struct snd_ac97 *ac97,
67 unsigned short reg, unsigned short val)
68{
69 int status;
70
71 /* Wait for command status zero = ready */
72 status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) &
73 MPC52xx_PSC_SR_CMDSEND), 100, 0);
74 if (status == 0) {
75 pr_err("timeout on ac97 bus (write)\n");
76 return;
77 }
78 /* Write data */
79 out_be32(&psc_dma->psc_regs->ac97_cmd,
80 ((reg & 0x7f) << 24) | (val << 8));
81}
82
83static void psc_ac97_warm_reset(struct snd_ac97 *ac97)
84{
85 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
86
87 out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_AWR);
88 udelay(3);
89 out_be32(&regs->sicr, psc_dma->sicr);
90}
91
92static void psc_ac97_cold_reset(struct snd_ac97 *ac97)
93{
94 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
95
96 /* Do a cold reset */
97 out_8(&regs->op1, MPC52xx_PSC_OP_RES);
98 udelay(10);
99 out_8(&regs->op0, MPC52xx_PSC_OP_RES);
100 udelay(50);
101 psc_ac97_warm_reset(ac97);
102}
103
104struct snd_ac97_bus_ops soc_ac97_ops = {
105 .read = psc_ac97_read,
106 .write = psc_ac97_write,
107 .reset = psc_ac97_cold_reset,
108 .warm_reset = psc_ac97_warm_reset,
109};
110EXPORT_SYMBOL_GPL(soc_ac97_ops);
111
112static int psc_ac97_hw_analog_params(struct snd_pcm_substream *substream,
113 struct snd_pcm_hw_params *params,
114 struct snd_soc_dai *cpu_dai)
115{
116 struct psc_dma *psc_dma = cpu_dai->private_data;
117
118 dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i"
119 " periods=%i buffer_size=%i buffer_bytes=%i channels=%i"
120 " rate=%i format=%i\n",
121 __func__, substream, params_period_size(params),
122 params_period_bytes(params), params_periods(params),
123 params_buffer_size(params), params_buffer_bytes(params),
124 params_channels(params), params_rate(params),
125 params_format(params));
126
127
128 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) {
129 if (params_channels(params) == 1)
130 psc_dma->slots |= 0x00000100;
131 else
132 psc_dma->slots |= 0x00000300;
133 } else {
134 if (params_channels(params) == 1)
135 psc_dma->slots |= 0x01000000;
136 else
137 psc_dma->slots |= 0x03000000;
138 }
139 out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots);
140
141 return 0;
142}
143
144static int psc_ac97_hw_digital_params(struct snd_pcm_substream *substream,
145 struct snd_pcm_hw_params *params,
146 struct snd_soc_dai *cpu_dai)
147{
148 struct psc_dma *psc_dma = cpu_dai->private_data;
149
150 if (params_channels(params) == 1)
151 out_be32(&psc_dma->psc_regs->ac97_slots, 0x01000000);
152 else
153 out_be32(&psc_dma->psc_regs->ac97_slots, 0x03000000);
154
155 return 0;
156}
157
158static int psc_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
159 struct snd_soc_dai *dai)
160{
161 struct snd_soc_pcm_runtime *rtd = substream->private_data;
162 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
163
164 switch (cmd) {
165 case SNDRV_PCM_TRIGGER_STOP:
166 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
167 psc_dma->slots &= 0xFFFF0000;
168 else
169 psc_dma->slots &= 0x0000FFFF;
170
171 out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots);
172 break;
173 }
174 return 0;
175}
176
177static int psc_ac97_probe(struct platform_device *pdev,
178 struct snd_soc_dai *cpu_dai)
179{
180 struct psc_dma *psc_dma = cpu_dai->private_data;
181 struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
182
183 /* Go */
184 out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
185 return 0;
186}
187
188/* ---------------------------------------------------------------------
189 * ALSA SoC Bindings
190 *
191 * - Digital Audio Interface (DAI) template
192 * - create/destroy dai hooks
193 */
194
195/**
196 * psc_ac97_dai_template: template CPU Digital Audio Interface
197 */
198static struct snd_soc_dai_ops psc_ac97_analog_ops = {
199 .hw_params = psc_ac97_hw_analog_params,
200 .trigger = psc_ac97_trigger,
201};
202
203static struct snd_soc_dai_ops psc_ac97_digital_ops = {
204 .hw_params = psc_ac97_hw_digital_params,
205};
206
207struct snd_soc_dai psc_ac97_dai[] = {
208{
209 .name = "AC97",
210 .ac97_control = 1,
211 .probe = psc_ac97_probe,
212 .playback = {
213 .channels_min = 1,
214 .channels_max = 6,
215 .rates = SNDRV_PCM_RATE_8000_48000,
216 .formats = SNDRV_PCM_FMTBIT_S32_BE,
217 },
218 .capture = {
219 .channels_min = 1,
220 .channels_max = 2,
221 .rates = SNDRV_PCM_RATE_8000_48000,
222 .formats = SNDRV_PCM_FMTBIT_S32_BE,
223 },
224 .ops = &psc_ac97_analog_ops,
225},
226{
227 .name = "SPDIF",
228 .ac97_control = 1,
229 .playback = {
230 .channels_min = 1,
231 .channels_max = 2,
232 .rates = SNDRV_PCM_RATE_32000 | \
233 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
234 .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE,
235 },
236 .ops = &psc_ac97_digital_ops,
237} };
238EXPORT_SYMBOL_GPL(psc_ac97_dai);
239
240
241
242/* ---------------------------------------------------------------------
243 * OF platform bus binding code:
244 * - Probe/remove operations
245 * - OF device match table
246 */
247static int __devinit psc_ac97_of_probe(struct of_device *op,
248 const struct of_device_id *match)
249{
250 int rc, i;
251 struct snd_ac97 ac97;
252 struct mpc52xx_psc __iomem *regs;
253
254 rc = mpc5200_audio_dma_create(op);
255 if (rc != 0)
256 return rc;
257
258 for (i = 0; i < ARRAY_SIZE(psc_ac97_dai); i++)
259 psc_ac97_dai[i].dev = &op->dev;
260
261 rc = snd_soc_register_dais(psc_ac97_dai, ARRAY_SIZE(psc_ac97_dai));
262 if (rc != 0) {
263 dev_err(&op->dev, "Failed to register DAI\n");
264 return rc;
265 }
266
267 psc_dma = dev_get_drvdata(&op->dev);
268 regs = psc_dma->psc_regs;
269 ac97.private_data = psc_dma;
270
271 for (i = 0; i < ARRAY_SIZE(psc_ac97_dai); i++)
272 psc_ac97_dai[i].private_data = psc_dma;
273
274 psc_dma->imr = 0;
275 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
276
277 /* Configure the serial interface mode to AC97 */
278 psc_dma->sicr = MPC52xx_PSC_SICR_SIM_AC97 | MPC52xx_PSC_SICR_ENAC97;
279 out_be32(&regs->sicr, psc_dma->sicr);
280
281 /* No slots active */
282 out_be32(&regs->ac97_slots, 0x00000000);
283
284 return 0;
285}
286
287static int __devexit psc_ac97_of_remove(struct of_device *op)
288{
289 return mpc5200_audio_dma_destroy(op);
290}
291
292/* Match table for of_platform binding */
293static struct of_device_id psc_ac97_match[] __devinitdata = {
294 { .compatible = "fsl,mpc5200-psc-ac97", },
295 { .compatible = "fsl,mpc5200b-psc-ac97", },
296 {}
297};
298MODULE_DEVICE_TABLE(of, psc_ac97_match);
299
300static struct of_platform_driver psc_ac97_driver = {
301 .match_table = psc_ac97_match,
302 .probe = psc_ac97_of_probe,
303 .remove = __devexit_p(psc_ac97_of_remove),
304 .driver = {
305 .name = "mpc5200-psc-ac97",
306 .owner = THIS_MODULE,
307 },
308};
309
310/* ---------------------------------------------------------------------
311 * Module setup and teardown; simply register the of_platform driver
312 * for the PSC in AC97 mode.
313 */
314static int __init psc_ac97_init(void)
315{
316 return of_register_platform_driver(&psc_ac97_driver);
317}
318module_init(psc_ac97_init);
319
320static void __exit psc_ac97_exit(void)
321{
322 of_unregister_platform_driver(&psc_ac97_driver);
323}
324module_exit(psc_ac97_exit);
325
326MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
327MODULE_DESCRIPTION("mpc5200 AC97 module");
328MODULE_LICENSE("GPL");
329
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.h b/sound/soc/fsl/mpc5200_psc_ac97.h
new file mode 100644
index 000000000000..4bc18c35c369
--- /dev/null
+++ b/sound/soc/fsl/mpc5200_psc_ac97.h
@@ -0,0 +1,15 @@
1/*
2 * Freescale MPC5200 PSC in AC97 mode
3 * ALSA SoC Digital Audio Interface (DAI) driver
4 *
5 */
6
7#ifndef __SOUND_SOC_FSL_MPC52xx_PSC_AC97_H__
8#define __SOUND_SOC_FSL_MPC52xx_PSC_AC97_H__
9
10extern struct snd_soc_dai psc_ac97_dai[];
11
12#define MPC5200_AC97_NORMAL 0
13#define MPC5200_AC97_SPDIF 1
14
15#endif /* __SOUND_SOC_FSL_MPC52xx_PSC_AC97_H__ */
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 1111c710118a..ce8de90fb94a 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -3,31 +3,21 @@
3 * ALSA SoC Digital Audio Interface (DAI) driver 3 * ALSA SoC Digital Audio Interface (DAI) driver
4 * 4 *
5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 * Copyright (C) 2009 Jon Smirl, Digispeaker
6 */ 7 */
7 8
8#include <linux/init.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/device.h>
12#include <linux/delay.h>
13#include <linux/of_device.h> 10#include <linux/of_device.h>
14#include <linux/of_platform.h> 11#include <linux/of_platform.h>
15#include <linux/dma-mapping.h>
16 12
17#include <sound/core.h>
18#include <sound/pcm.h> 13#include <sound/pcm.h>
19#include <sound/pcm_params.h> 14#include <sound/pcm_params.h>
20#include <sound/initval.h>
21#include <sound/soc.h> 15#include <sound/soc.h>
22#include <sound/soc-of-simple.h>
23 16
24#include <sysdev/bestcomm/bestcomm.h>
25#include <sysdev/bestcomm/gen_bd.h>
26#include <asm/mpc52xx_psc.h> 17#include <asm/mpc52xx_psc.h>
27 18
28MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); 19#include "mpc5200_psc_i2s.h"
29MODULE_DESCRIPTION("Freescale MPC5200 PSC in I2S mode ASoC Driver"); 20#include "mpc5200_dma.h"
30MODULE_LICENSE("GPL");
31 21
32/** 22/**
33 * PSC_I2S_RATES: sample rates supported by the I2S 23 * PSC_I2S_RATES: sample rates supported by the I2S
@@ -44,191 +34,17 @@ MODULE_LICENSE("GPL");
44 * PSC_I2S_FORMATS: audio formats supported by the PSC I2S mode 34 * PSC_I2S_FORMATS: audio formats supported by the PSC I2S mode
45 */ 35 */
46#define PSC_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \ 36#define PSC_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
47 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S24_BE | \ 37 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE)
48 SNDRV_PCM_FMTBIT_S32_BE)
49
50/**
51 * psc_i2s_stream - Data specific to a single stream (playback or capture)
52 * @active: flag indicating if the stream is active
53 * @psc_i2s: pointer back to parent psc_i2s data structure
54 * @bcom_task: bestcomm task structure
55 * @irq: irq number for bestcomm task
56 * @period_start: physical address of start of DMA region
57 * @period_end: physical address of end of DMA region
58 * @period_next_pt: physical address of next DMA buffer to enqueue
59 * @period_bytes: size of DMA period in bytes
60 */
61struct psc_i2s_stream {
62 int active;
63 struct psc_i2s *psc_i2s;
64 struct bcom_task *bcom_task;
65 int irq;
66 struct snd_pcm_substream *stream;
67 dma_addr_t period_start;
68 dma_addr_t period_end;
69 dma_addr_t period_next_pt;
70 dma_addr_t period_current_pt;
71 int period_bytes;
72};
73
74/**
75 * psc_i2s - Private driver data
76 * @name: short name for this device ("PSC0", "PSC1", etc)
77 * @psc_regs: pointer to the PSC's registers
78 * @fifo_regs: pointer to the PSC's FIFO registers
79 * @irq: IRQ of this PSC
80 * @dev: struct device pointer
81 * @dai: the CPU DAI for this device
82 * @sicr: Base value used in serial interface control register; mode is ORed
83 * with this value.
84 * @playback: Playback stream context data
85 * @capture: Capture stream context data
86 */
87struct psc_i2s {
88 char name[32];
89 struct mpc52xx_psc __iomem *psc_regs;
90 struct mpc52xx_psc_fifo __iomem *fifo_regs;
91 unsigned int irq;
92 struct device *dev;
93 struct snd_soc_dai dai;
94 spinlock_t lock;
95 u32 sicr;
96
97 /* per-stream data */
98 struct psc_i2s_stream playback;
99 struct psc_i2s_stream capture;
100
101 /* Statistics */
102 struct {
103 int overrun_count;
104 int underrun_count;
105 } stats;
106};
107
108/*
109 * Interrupt handlers
110 */
111static irqreturn_t psc_i2s_status_irq(int irq, void *_psc_i2s)
112{
113 struct psc_i2s *psc_i2s = _psc_i2s;
114 struct mpc52xx_psc __iomem *regs = psc_i2s->psc_regs;
115 u16 isr;
116
117 isr = in_be16(&regs->mpc52xx_psc_isr);
118
119 /* Playback underrun error */
120 if (psc_i2s->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP))
121 psc_i2s->stats.underrun_count++;
122
123 /* Capture overrun error */
124 if (psc_i2s->capture.active && (isr & MPC52xx_PSC_IMR_ORERR))
125 psc_i2s->stats.overrun_count++;
126
127 out_8(&regs->command, 4 << 4); /* reset the error status */
128
129 return IRQ_HANDLED;
130}
131
132/**
133 * psc_i2s_bcom_enqueue_next_buffer - Enqueue another audio buffer
134 * @s: pointer to stream private data structure
135 *
136 * Enqueues another audio period buffer into the bestcomm queue.
137 *
138 * Note: The routine must only be called when there is space available in
139 * the queue. Otherwise the enqueue will fail and the audio ring buffer
140 * will get out of sync
141 */
142static void psc_i2s_bcom_enqueue_next_buffer(struct psc_i2s_stream *s)
143{
144 struct bcom_bd *bd;
145
146 /* Prepare and enqueue the next buffer descriptor */
147 bd = bcom_prepare_next_buffer(s->bcom_task);
148 bd->status = s->period_bytes;
149 bd->data[0] = s->period_next_pt;
150 bcom_submit_next_buffer(s->bcom_task, NULL);
151
152 /* Update for next period */
153 s->period_next_pt += s->period_bytes;
154 if (s->period_next_pt >= s->period_end)
155 s->period_next_pt = s->period_start;
156}
157
158/* Bestcomm DMA irq handler */
159static irqreturn_t psc_i2s_bcom_irq(int irq, void *_psc_i2s_stream)
160{
161 struct psc_i2s_stream *s = _psc_i2s_stream;
162
163 /* For each finished period, dequeue the completed period buffer
164 * and enqueue a new one in it's place. */
165 while (bcom_buffer_done(s->bcom_task)) {
166 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
167 s->period_current_pt += s->period_bytes;
168 if (s->period_current_pt >= s->period_end)
169 s->period_current_pt = s->period_start;
170 psc_i2s_bcom_enqueue_next_buffer(s);
171 bcom_enable(s->bcom_task);
172 }
173
174 /* If the stream is active, then also inform the PCM middle layer
175 * of the period finished event. */
176 if (s->active)
177 snd_pcm_period_elapsed(s->stream);
178
179 return IRQ_HANDLED;
180}
181
182/**
183 * psc_i2s_startup: create a new substream
184 *
185 * This is the first function called when a stream is opened.
186 *
187 * If this is the first stream open, then grab the IRQ and program most of
188 * the PSC registers.
189 */
190static int psc_i2s_startup(struct snd_pcm_substream *substream,
191 struct snd_soc_dai *dai)
192{
193 struct snd_soc_pcm_runtime *rtd = substream->private_data;
194 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data;
195 int rc;
196
197 dev_dbg(psc_i2s->dev, "psc_i2s_startup(substream=%p)\n", substream);
198
199 if (!psc_i2s->playback.active &&
200 !psc_i2s->capture.active) {
201 /* Setup the IRQs */
202 rc = request_irq(psc_i2s->irq, &psc_i2s_status_irq, IRQF_SHARED,
203 "psc-i2s-status", psc_i2s);
204 rc |= request_irq(psc_i2s->capture.irq,
205 &psc_i2s_bcom_irq, IRQF_SHARED,
206 "psc-i2s-capture", &psc_i2s->capture);
207 rc |= request_irq(psc_i2s->playback.irq,
208 &psc_i2s_bcom_irq, IRQF_SHARED,
209 "psc-i2s-playback", &psc_i2s->playback);
210 if (rc) {
211 free_irq(psc_i2s->irq, psc_i2s);
212 free_irq(psc_i2s->capture.irq,
213 &psc_i2s->capture);
214 free_irq(psc_i2s->playback.irq,
215 &psc_i2s->playback);
216 return -ENODEV;
217 }
218 }
219
220 return 0;
221}
222 38
223static int psc_i2s_hw_params(struct snd_pcm_substream *substream, 39static int psc_i2s_hw_params(struct snd_pcm_substream *substream,
224 struct snd_pcm_hw_params *params, 40 struct snd_pcm_hw_params *params,
225 struct snd_soc_dai *dai) 41 struct snd_soc_dai *dai)
226{ 42{
227 struct snd_soc_pcm_runtime *rtd = substream->private_data; 43 struct snd_soc_pcm_runtime *rtd = substream->private_data;
228 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data; 44 struct psc_dma *psc_dma = rtd->dai->cpu_dai->private_data;
229 u32 mode; 45 u32 mode;
230 46
231 dev_dbg(psc_i2s->dev, "%s(substream=%p) p_size=%i p_bytes=%i" 47 dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i"
232 " periods=%i buffer_size=%i buffer_bytes=%i\n", 48 " periods=%i buffer_size=%i buffer_bytes=%i\n",
233 __func__, substream, params_period_size(params), 49 __func__, substream, params_period_size(params),
234 params_period_bytes(params), params_periods(params), 50 params_period_bytes(params), params_periods(params),
@@ -248,175 +64,15 @@ static int psc_i2s_hw_params(struct snd_pcm_substream *substream,
248 mode = MPC52xx_PSC_SICR_SIM_CODEC_32; 64 mode = MPC52xx_PSC_SICR_SIM_CODEC_32;
249 break; 65 break;
250 default: 66 default:
251 dev_dbg(psc_i2s->dev, "invalid format\n"); 67 dev_dbg(psc_dma->dev, "invalid format\n");
252 return -EINVAL;
253 }
254 out_be32(&psc_i2s->psc_regs->sicr, psc_i2s->sicr | mode);
255
256 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
257
258 return 0;
259}
260
261static int psc_i2s_hw_free(struct snd_pcm_substream *substream,
262 struct snd_soc_dai *dai)
263{
264 snd_pcm_set_runtime_buffer(substream, NULL);
265 return 0;
266}
267
268/**
269 * psc_i2s_trigger: start and stop the DMA transfer.
270 *
271 * This function is called by ALSA to start, stop, pause, and resume the DMA
272 * transfer of data.
273 */
274static int psc_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
275 struct snd_soc_dai *dai)
276{
277 struct snd_soc_pcm_runtime *rtd = substream->private_data;
278 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data;
279 struct snd_pcm_runtime *runtime = substream->runtime;
280 struct psc_i2s_stream *s;
281 struct mpc52xx_psc __iomem *regs = psc_i2s->psc_regs;
282 u16 imr;
283 u8 psc_cmd;
284 unsigned long flags;
285
286 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
287 s = &psc_i2s->capture;
288 else
289 s = &psc_i2s->playback;
290
291 dev_dbg(psc_i2s->dev, "psc_i2s_trigger(substream=%p, cmd=%i)"
292 " stream_id=%i\n",
293 substream, cmd, substream->pstr->stream);
294
295 switch (cmd) {
296 case SNDRV_PCM_TRIGGER_START:
297 s->period_bytes = frames_to_bytes(runtime,
298 runtime->period_size);
299 s->period_start = virt_to_phys(runtime->dma_area);
300 s->period_end = s->period_start +
301 (s->period_bytes * runtime->periods);
302 s->period_next_pt = s->period_start;
303 s->period_current_pt = s->period_start;
304 s->active = 1;
305
306 /* First; reset everything */
307 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) {
308 out_8(&regs->command, MPC52xx_PSC_RST_RX);
309 out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
310 } else {
311 out_8(&regs->command, MPC52xx_PSC_RST_TX);
312 out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT);
313 }
314
315 /* Next, fill up the bestcomm bd queue and enable DMA.
316 * This will begin filling the PSC's fifo. */
317 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
318 bcom_gen_bd_rx_reset(s->bcom_task);
319 else
320 bcom_gen_bd_tx_reset(s->bcom_task);
321 while (!bcom_queue_full(s->bcom_task))
322 psc_i2s_bcom_enqueue_next_buffer(s);
323 bcom_enable(s->bcom_task);
324
325 /* Due to errata in the i2s mode; need to line up enabling
326 * the transmitter with a transition on the frame sync
327 * line */
328
329 spin_lock_irqsave(&psc_i2s->lock, flags);
330 /* first make sure it is low */
331 while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) != 0)
332 ;
333 /* then wait for the transition to high */
334 while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) == 0)
335 ;
336 /* Finally, enable the PSC.
337 * Receiver must always be enabled; even when we only want
338 * transmit. (see 15.3.2.3 of MPC5200B User's Guide) */
339 psc_cmd = MPC52xx_PSC_RX_ENABLE;
340 if (substream->pstr->stream == SNDRV_PCM_STREAM_PLAYBACK)
341 psc_cmd |= MPC52xx_PSC_TX_ENABLE;
342 out_8(&regs->command, psc_cmd);
343 spin_unlock_irqrestore(&psc_i2s->lock, flags);
344
345 break;
346
347 case SNDRV_PCM_TRIGGER_STOP:
348 /* Turn off the PSC */
349 s->active = 0;
350 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) {
351 if (!psc_i2s->playback.active) {
352 out_8(&regs->command, 2 << 4); /* reset rx */
353 out_8(&regs->command, 3 << 4); /* reset tx */
354 out_8(&regs->command, 4 << 4); /* reset err */
355 }
356 } else {
357 out_8(&regs->command, 3 << 4); /* reset tx */
358 out_8(&regs->command, 4 << 4); /* reset err */
359 if (!psc_i2s->capture.active)
360 out_8(&regs->command, 2 << 4); /* reset rx */
361 }
362
363 bcom_disable(s->bcom_task);
364 while (!bcom_queue_empty(s->bcom_task))
365 bcom_retrieve_buffer(s->bcom_task, NULL, NULL);
366
367 break;
368
369 default:
370 dev_dbg(psc_i2s->dev, "invalid command\n");
371 return -EINVAL; 68 return -EINVAL;
372 } 69 }
373 70 out_be32(&psc_dma->psc_regs->sicr, psc_dma->sicr | mode);
374 /* Update interrupt enable settings */
375 imr = 0;
376 if (psc_i2s->playback.active)
377 imr |= MPC52xx_PSC_IMR_TXEMP;
378 if (psc_i2s->capture.active)
379 imr |= MPC52xx_PSC_IMR_ORERR;
380 out_be16(&regs->isr_imr.imr, imr);
381 71
382 return 0; 72 return 0;
383} 73}
384 74
385/** 75/**
386 * psc_i2s_shutdown: shutdown the data transfer on a stream
387 *
388 * Shutdown the PSC if there are no other substreams open.
389 */
390static void psc_i2s_shutdown(struct snd_pcm_substream *substream,
391 struct snd_soc_dai *dai)
392{
393 struct snd_soc_pcm_runtime *rtd = substream->private_data;
394 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data;
395
396 dev_dbg(psc_i2s->dev, "psc_i2s_shutdown(substream=%p)\n", substream);
397
398 /*
399 * If this is the last active substream, disable the PSC and release
400 * the IRQ.
401 */
402 if (!psc_i2s->playback.active &&
403 !psc_i2s->capture.active) {
404
405 /* Disable all interrupts and reset the PSC */
406 out_be16(&psc_i2s->psc_regs->isr_imr.imr, 0);
407 out_8(&psc_i2s->psc_regs->command, 3 << 4); /* reset tx */
408 out_8(&psc_i2s->psc_regs->command, 2 << 4); /* reset rx */
409 out_8(&psc_i2s->psc_regs->command, 1 << 4); /* reset mode */
410 out_8(&psc_i2s->psc_regs->command, 4 << 4); /* reset error */
411
412 /* Release irqs */
413 free_irq(psc_i2s->irq, psc_i2s);
414 free_irq(psc_i2s->capture.irq, &psc_i2s->capture);
415 free_irq(psc_i2s->playback.irq, &psc_i2s->playback);
416 }
417}
418
419/**
420 * psc_i2s_set_sysclk: set the clock frequency and direction 76 * psc_i2s_set_sysclk: set the clock frequency and direction
421 * 77 *
422 * This function is called by the machine driver to tell us what the clock 78 * This function is called by the machine driver to tell us what the clock
@@ -433,8 +89,8 @@ static void psc_i2s_shutdown(struct snd_pcm_substream *substream,
433static int psc_i2s_set_sysclk(struct snd_soc_dai *cpu_dai, 89static int psc_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
434 int clk_id, unsigned int freq, int dir) 90 int clk_id, unsigned int freq, int dir)
435{ 91{
436 struct psc_i2s *psc_i2s = cpu_dai->private_data; 92 struct psc_dma *psc_dma = cpu_dai->private_data;
437 dev_dbg(psc_i2s->dev, "psc_i2s_set_sysclk(cpu_dai=%p, dir=%i)\n", 93 dev_dbg(psc_dma->dev, "psc_i2s_set_sysclk(cpu_dai=%p, dir=%i)\n",
438 cpu_dai, dir); 94 cpu_dai, dir);
439 return (dir == SND_SOC_CLOCK_IN) ? 0 : -EINVAL; 95 return (dir == SND_SOC_CLOCK_IN) ? 0 : -EINVAL;
440} 96}
@@ -452,8 +108,8 @@ static int psc_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
452 */ 108 */
453static int psc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int format) 109static int psc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int format)
454{ 110{
455 struct psc_i2s *psc_i2s = cpu_dai->private_data; 111 struct psc_dma *psc_dma = cpu_dai->private_data;
456 dev_dbg(psc_i2s->dev, "psc_i2s_set_fmt(cpu_dai=%p, format=%i)\n", 112 dev_dbg(psc_dma->dev, "psc_i2s_set_fmt(cpu_dai=%p, format=%i)\n",
457 cpu_dai, format); 113 cpu_dai, format);
458 return (format == SND_SOC_DAIFMT_I2S) ? 0 : -EINVAL; 114 return (format == SND_SOC_DAIFMT_I2S) ? 0 : -EINVAL;
459} 115}
@@ -469,16 +125,13 @@ static int psc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int format)
469 * psc_i2s_dai_template: template CPU Digital Audio Interface 125 * psc_i2s_dai_template: template CPU Digital Audio Interface
470 */ 126 */
471static struct snd_soc_dai_ops psc_i2s_dai_ops = { 127static struct snd_soc_dai_ops psc_i2s_dai_ops = {
472 .startup = psc_i2s_startup,
473 .hw_params = psc_i2s_hw_params, 128 .hw_params = psc_i2s_hw_params,
474 .hw_free = psc_i2s_hw_free,
475 .shutdown = psc_i2s_shutdown,
476 .trigger = psc_i2s_trigger,
477 .set_sysclk = psc_i2s_set_sysclk, 129 .set_sysclk = psc_i2s_set_sysclk,
478 .set_fmt = psc_i2s_set_fmt, 130 .set_fmt = psc_i2s_set_fmt,
479}; 131};
480 132
481static struct snd_soc_dai psc_i2s_dai_template = { 133struct snd_soc_dai psc_i2s_dai[] = {{
134 .name = "I2S",
482 .playback = { 135 .playback = {
483 .channels_min = 2, 136 .channels_min = 2,
484 .channels_max = 2, 137 .channels_max = 2,
@@ -492,223 +145,8 @@ static struct snd_soc_dai psc_i2s_dai_template = {
492 .formats = PSC_I2S_FORMATS, 145 .formats = PSC_I2S_FORMATS,
493 }, 146 },
494 .ops = &psc_i2s_dai_ops, 147 .ops = &psc_i2s_dai_ops,
495}; 148} };
496 149EXPORT_SYMBOL_GPL(psc_i2s_dai);
497/* ---------------------------------------------------------------------
498 * The PSC I2S 'ASoC platform' driver
499 *
500 * Can be referenced by an 'ASoC machine' driver
501 * This driver only deals with the audio bus; it doesn't have any
502 * interaction with the attached codec
503 */
504
505static const struct snd_pcm_hardware psc_i2s_pcm_hardware = {
506 .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
507 SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
508 SNDRV_PCM_INFO_BATCH,
509 .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |
510 SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE,
511 .rate_min = 8000,
512 .rate_max = 48000,
513 .channels_min = 2,
514 .channels_max = 2,
515 .period_bytes_max = 1024 * 1024,
516 .period_bytes_min = 32,
517 .periods_min = 2,
518 .periods_max = 256,
519 .buffer_bytes_max = 2 * 1024 * 1024,
520 .fifo_size = 0,
521};
522
523static int psc_i2s_pcm_open(struct snd_pcm_substream *substream)
524{
525 struct snd_soc_pcm_runtime *rtd = substream->private_data;
526 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data;
527 struct psc_i2s_stream *s;
528
529 dev_dbg(psc_i2s->dev, "psc_i2s_pcm_open(substream=%p)\n", substream);
530
531 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
532 s = &psc_i2s->capture;
533 else
534 s = &psc_i2s->playback;
535
536 snd_soc_set_runtime_hwparams(substream, &psc_i2s_pcm_hardware);
537
538 s->stream = substream;
539 return 0;
540}
541
542static int psc_i2s_pcm_close(struct snd_pcm_substream *substream)
543{
544 struct snd_soc_pcm_runtime *rtd = substream->private_data;
545 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data;
546 struct psc_i2s_stream *s;
547
548 dev_dbg(psc_i2s->dev, "psc_i2s_pcm_close(substream=%p)\n", substream);
549
550 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
551 s = &psc_i2s->capture;
552 else
553 s = &psc_i2s->playback;
554
555 s->stream = NULL;
556 return 0;
557}
558
559static snd_pcm_uframes_t
560psc_i2s_pcm_pointer(struct snd_pcm_substream *substream)
561{
562 struct snd_soc_pcm_runtime *rtd = substream->private_data;
563 struct psc_i2s *psc_i2s = rtd->dai->cpu_dai->private_data;
564 struct psc_i2s_stream *s;
565 dma_addr_t count;
566
567 if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE)
568 s = &psc_i2s->capture;
569 else
570 s = &psc_i2s->playback;
571
572 count = s->period_current_pt - s->period_start;
573
574 return bytes_to_frames(substream->runtime, count);
575}
576
577static struct snd_pcm_ops psc_i2s_pcm_ops = {
578 .open = psc_i2s_pcm_open,
579 .close = psc_i2s_pcm_close,
580 .ioctl = snd_pcm_lib_ioctl,
581 .pointer = psc_i2s_pcm_pointer,
582};
583
584static u64 psc_i2s_pcm_dmamask = 0xffffffff;
585static int psc_i2s_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
586 struct snd_pcm *pcm)
587{
588 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
589 size_t size = psc_i2s_pcm_hardware.buffer_bytes_max;
590 int rc = 0;
591
592 dev_dbg(rtd->socdev->dev, "psc_i2s_pcm_new(card=%p, dai=%p, pcm=%p)\n",
593 card, dai, pcm);
594
595 if (!card->dev->dma_mask)
596 card->dev->dma_mask = &psc_i2s_pcm_dmamask;
597 if (!card->dev->coherent_dma_mask)
598 card->dev->coherent_dma_mask = 0xffffffff;
599
600 if (pcm->streams[0].substream) {
601 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->dev, size,
602 &pcm->streams[0].substream->dma_buffer);
603 if (rc)
604 goto playback_alloc_err;
605 }
606
607 if (pcm->streams[1].substream) {
608 rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->dev, size,
609 &pcm->streams[1].substream->dma_buffer);
610 if (rc)
611 goto capture_alloc_err;
612 }
613
614 return 0;
615
616 capture_alloc_err:
617 if (pcm->streams[0].substream)
618 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
619 playback_alloc_err:
620 dev_err(card->dev, "Cannot allocate buffer(s)\n");
621 return -ENOMEM;
622}
623
624static void psc_i2s_pcm_free(struct snd_pcm *pcm)
625{
626 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
627 struct snd_pcm_substream *substream;
628 int stream;
629
630 dev_dbg(rtd->socdev->dev, "psc_i2s_pcm_free(pcm=%p)\n", pcm);
631
632 for (stream = 0; stream < 2; stream++) {
633 substream = pcm->streams[stream].substream;
634 if (substream) {
635 snd_dma_free_pages(&substream->dma_buffer);
636 substream->dma_buffer.area = NULL;
637 substream->dma_buffer.addr = 0;
638 }
639 }
640}
641
642struct snd_soc_platform psc_i2s_pcm_soc_platform = {
643 .name = "mpc5200-psc-audio",
644 .pcm_ops = &psc_i2s_pcm_ops,
645 .pcm_new = &psc_i2s_pcm_new,
646 .pcm_free = &psc_i2s_pcm_free,
647};
648
649/* ---------------------------------------------------------------------
650 * Sysfs attributes for debugging
651 */
652
653static ssize_t psc_i2s_status_show(struct device *dev,
654 struct device_attribute *attr, char *buf)
655{
656 struct psc_i2s *psc_i2s = dev_get_drvdata(dev);
657
658 return sprintf(buf, "status=%.4x sicr=%.8x rfnum=%i rfstat=0x%.4x "
659 "tfnum=%i tfstat=0x%.4x\n",
660 in_be16(&psc_i2s->psc_regs->sr_csr.status),
661 in_be32(&psc_i2s->psc_regs->sicr),
662 in_be16(&psc_i2s->fifo_regs->rfnum) & 0x1ff,
663 in_be16(&psc_i2s->fifo_regs->rfstat),
664 in_be16(&psc_i2s->fifo_regs->tfnum) & 0x1ff,
665 in_be16(&psc_i2s->fifo_regs->tfstat));
666}
667
668static int *psc_i2s_get_stat_attr(struct psc_i2s *psc_i2s, const char *name)
669{
670 if (strcmp(name, "playback_underrun") == 0)
671 return &psc_i2s->stats.underrun_count;
672 if (strcmp(name, "capture_overrun") == 0)
673 return &psc_i2s->stats.overrun_count;
674
675 return NULL;
676}
677
678static ssize_t psc_i2s_stat_show(struct device *dev,
679 struct device_attribute *attr, char *buf)
680{
681 struct psc_i2s *psc_i2s = dev_get_drvdata(dev);
682 int *attrib;
683
684 attrib = psc_i2s_get_stat_attr(psc_i2s, attr->attr.name);
685 if (!attrib)
686 return 0;
687
688 return sprintf(buf, "%i\n", *attrib);
689}
690
691static ssize_t psc_i2s_stat_store(struct device *dev,
692 struct device_attribute *attr,
693 const char *buf,
694 size_t count)
695{
696 struct psc_i2s *psc_i2s = dev_get_drvdata(dev);
697 int *attrib;
698
699 attrib = psc_i2s_get_stat_attr(psc_i2s, attr->attr.name);
700 if (!attrib)
701 return 0;
702
703 *attrib = simple_strtoul(buf, NULL, 0);
704 return count;
705}
706
707static DEVICE_ATTR(status, 0644, psc_i2s_status_show, NULL);
708static DEVICE_ATTR(playback_underrun, 0644, psc_i2s_stat_show,
709 psc_i2s_stat_store);
710static DEVICE_ATTR(capture_overrun, 0644, psc_i2s_stat_show,
711 psc_i2s_stat_store);
712 150
713/* --------------------------------------------------------------------- 151/* ---------------------------------------------------------------------
714 * OF platform bus binding code: 152 * OF platform bus binding code:
@@ -718,150 +156,65 @@ static DEVICE_ATTR(capture_overrun, 0644, psc_i2s_stat_show,
718static int __devinit psc_i2s_of_probe(struct of_device *op, 156static int __devinit psc_i2s_of_probe(struct of_device *op,
719 const struct of_device_id *match) 157 const struct of_device_id *match)
720{ 158{
721 phys_addr_t fifo; 159 int rc;
722 struct psc_i2s *psc_i2s; 160 struct psc_dma *psc_dma;
723 struct resource res; 161 struct mpc52xx_psc __iomem *regs;
724 int size, psc_id, irq, rc;
725 const __be32 *prop;
726 void __iomem *regs;
727
728 dev_dbg(&op->dev, "probing psc i2s device\n");
729
730 /* Get the PSC ID */
731 prop = of_get_property(op->node, "cell-index", &size);
732 if (!prop || size < sizeof *prop)
733 return -ENODEV;
734 psc_id = be32_to_cpu(*prop);
735
736 /* Fetch the registers and IRQ of the PSC */
737 irq = irq_of_parse_and_map(op->node, 0);
738 if (of_address_to_resource(op->node, 0, &res)) {
739 dev_err(&op->dev, "Missing reg property\n");
740 return -ENODEV;
741 }
742 regs = ioremap(res.start, 1 + res.end - res.start);
743 if (!regs) {
744 dev_err(&op->dev, "Could not map registers\n");
745 return -ENODEV;
746 }
747 162
748 /* Allocate and initialize the driver private data */ 163 rc = mpc5200_audio_dma_create(op);
749 psc_i2s = kzalloc(sizeof *psc_i2s, GFP_KERNEL); 164 if (rc != 0)
750 if (!psc_i2s) { 165 return rc;
751 iounmap(regs); 166
752 return -ENOMEM; 167 rc = snd_soc_register_dais(psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai));
753 } 168 if (rc != 0) {
754 spin_lock_init(&psc_i2s->lock); 169 pr_err("Failed to register DAI\n");
755 psc_i2s->irq = irq; 170 return 0;
756 psc_i2s->psc_regs = regs;
757 psc_i2s->fifo_regs = regs + sizeof *psc_i2s->psc_regs;
758 psc_i2s->dev = &op->dev;
759 psc_i2s->playback.psc_i2s = psc_i2s;
760 psc_i2s->capture.psc_i2s = psc_i2s;
761 snprintf(psc_i2s->name, sizeof psc_i2s->name, "PSC%u", psc_id+1);
762
763 /* Fill out the CPU DAI structure */
764 memcpy(&psc_i2s->dai, &psc_i2s_dai_template, sizeof psc_i2s->dai);
765 psc_i2s->dai.private_data = psc_i2s;
766 psc_i2s->dai.name = psc_i2s->name;
767 psc_i2s->dai.id = psc_id;
768
769 /* Find the address of the fifo data registers and setup the
770 * DMA tasks */
771 fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32);
772 psc_i2s->capture.bcom_task =
773 bcom_psc_gen_bd_rx_init(psc_id, 10, fifo, 512);
774 psc_i2s->playback.bcom_task =
775 bcom_psc_gen_bd_tx_init(psc_id, 10, fifo);
776 if (!psc_i2s->capture.bcom_task ||
777 !psc_i2s->playback.bcom_task) {
778 dev_err(&op->dev, "Could not allocate bestcomm tasks\n");
779 iounmap(regs);
780 kfree(psc_i2s);
781 return -ENODEV;
782 } 171 }
783 172
784 /* Disable all interrupts and reset the PSC */ 173 psc_dma = dev_get_drvdata(&op->dev);
785 out_be16(&psc_i2s->psc_regs->isr_imr.imr, 0); 174 regs = psc_dma->psc_regs;
786 out_8(&psc_i2s->psc_regs->command, 3 << 4); /* reset transmitter */
787 out_8(&psc_i2s->psc_regs->command, 2 << 4); /* reset receiver */
788 out_8(&psc_i2s->psc_regs->command, 1 << 4); /* reset mode */
789 out_8(&psc_i2s->psc_regs->command, 4 << 4); /* reset error */
790 175
791 /* Configure the serial interface mode; defaulting to CODEC8 mode */ 176 /* Configure the serial interface mode; defaulting to CODEC8 mode */
792 psc_i2s->sicr = MPC52xx_PSC_SICR_DTS1 | MPC52xx_PSC_SICR_I2S | 177 psc_dma->sicr = MPC52xx_PSC_SICR_DTS1 | MPC52xx_PSC_SICR_I2S |
793 MPC52xx_PSC_SICR_CLKPOL; 178 MPC52xx_PSC_SICR_CLKPOL;
794 if (of_get_property(op->node, "fsl,cellslave", NULL)) 179 out_be32(&psc_dma->psc_regs->sicr,
795 psc_i2s->sicr |= MPC52xx_PSC_SICR_CELLSLAVE | 180 psc_dma->sicr | MPC52xx_PSC_SICR_SIM_CODEC_8);
796 MPC52xx_PSC_SICR_GENCLK;
797 out_be32(&psc_i2s->psc_regs->sicr,
798 psc_i2s->sicr | MPC52xx_PSC_SICR_SIM_CODEC_8);
799 181
800 /* Check for the codec handle. If it is not present then we 182 /* Check for the codec handle. If it is not present then we
801 * are done */ 183 * are done */
802 if (!of_get_property(op->node, "codec-handle", NULL)) 184 if (!of_get_property(op->node, "codec-handle", NULL))
803 return 0; 185 return 0;
804 186
805 /* Set up mode register; 187 /* Due to errata in the dma mode; need to line up enabling
806 * First write: RxRdy (FIFO Alarm) generates rx FIFO irq 188 * the transmitter with a transition on the frame sync
807 * Second write: register Normal mode for non loopback 189 * line */
808 */ 190
809 out_8(&psc_i2s->psc_regs->mode, 0); 191 /* first make sure it is low */
810 out_8(&psc_i2s->psc_regs->mode, 0); 192 while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) != 0)
811 193 ;
812 /* Set the TX and RX fifo alarm thresholds */ 194 /* then wait for the transition to high */
813 out_be16(&psc_i2s->fifo_regs->rfalarm, 0x100); 195 while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) == 0)
814 out_8(&psc_i2s->fifo_regs->rfcntl, 0x4); 196 ;
815 out_be16(&psc_i2s->fifo_regs->tfalarm, 0x100); 197 /* Finally, enable the PSC.
816 out_8(&psc_i2s->fifo_regs->tfcntl, 0x7); 198 * Receiver must always be enabled; even when we only want
817 199 * transmit. (see 15.3.2.3 of MPC5200B User's Guide) */
818 /* Lookup the IRQ numbers */ 200
819 psc_i2s->playback.irq = 201 /* Go */
820 bcom_get_task_irq(psc_i2s->playback.bcom_task); 202 out_8(&psc_dma->psc_regs->command,
821 psc_i2s->capture.irq = 203 MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
822 bcom_get_task_irq(psc_i2s->capture.bcom_task);
823
824 /* Save what we've done so it can be found again later */
825 dev_set_drvdata(&op->dev, psc_i2s);
826
827 /* Register the SYSFS files */
828 rc = device_create_file(psc_i2s->dev, &dev_attr_status);
829 rc |= device_create_file(psc_i2s->dev, &dev_attr_capture_overrun);
830 rc |= device_create_file(psc_i2s->dev, &dev_attr_playback_underrun);
831 if (rc)
832 dev_info(psc_i2s->dev, "error creating sysfs files\n");
833
834 snd_soc_register_platform(&psc_i2s_pcm_soc_platform);
835
836 /* Tell the ASoC OF helpers about it */
837 of_snd_soc_register_platform(&psc_i2s_pcm_soc_platform, op->node,
838 &psc_i2s->dai);
839 204
840 return 0; 205 return 0;
206
841} 207}
842 208
843static int __devexit psc_i2s_of_remove(struct of_device *op) 209static int __devexit psc_i2s_of_remove(struct of_device *op)
844{ 210{
845 struct psc_i2s *psc_i2s = dev_get_drvdata(&op->dev); 211 return mpc5200_audio_dma_destroy(op);
846
847 dev_dbg(&op->dev, "psc_i2s_remove()\n");
848
849 snd_soc_unregister_platform(&psc_i2s_pcm_soc_platform);
850
851 bcom_gen_bd_rx_release(psc_i2s->capture.bcom_task);
852 bcom_gen_bd_tx_release(psc_i2s->playback.bcom_task);
853
854 iounmap(psc_i2s->psc_regs);
855 iounmap(psc_i2s->fifo_regs);
856 kfree(psc_i2s);
857 dev_set_drvdata(&op->dev, NULL);
858
859 return 0;
860} 212}
861 213
862/* Match table for of_platform binding */ 214/* Match table for of_platform binding */
863static struct of_device_id psc_i2s_match[] __devinitdata = { 215static struct of_device_id psc_i2s_match[] __devinitdata = {
864 { .compatible = "fsl,mpc5200-psc-i2s", }, 216 { .compatible = "fsl,mpc5200-psc-i2s", },
217 { .compatible = "fsl,mpc5200b-psc-i2s", },
865 {} 218 {}
866}; 219};
867MODULE_DEVICE_TABLE(of, psc_i2s_match); 220MODULE_DEVICE_TABLE(of, psc_i2s_match);
@@ -892,4 +245,7 @@ static void __exit psc_i2s_exit(void)
892} 245}
893module_exit(psc_i2s_exit); 246module_exit(psc_i2s_exit);
894 247
248MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
249MODULE_DESCRIPTION("Freescale MPC5200 PSC in I2S mode ASoC Driver");
250MODULE_LICENSE("GPL");
895 251
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.h b/sound/soc/fsl/mpc5200_psc_i2s.h
new file mode 100644
index 000000000000..ce55e070fdf3
--- /dev/null
+++ b/sound/soc/fsl/mpc5200_psc_i2s.h
@@ -0,0 +1,12 @@
1/*
2 * Freescale MPC5200 PSC in I2S mode
3 * ALSA SoC Digital Audio Interface (DAI) driver
4 *
5 */
6
7#ifndef __SOUND_SOC_FSL_MPC52xx_PSC_I2S_H__
8#define __SOUND_SOC_FSL_MPC52xx_PSC_I2S_H__
9
10extern struct snd_soc_dai psc_i2s_dai[];
11
12#endif /* __SOUND_SOC_FSL_MPC52xx_PSC_I2S_H__ */
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
new file mode 100644
index 000000000000..8766f7a3893d
--- /dev/null
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -0,0 +1,90 @@
1/*
2 * Phytec pcm030 driver for the PSC of the Freescale MPC52xx
3 * configured as AC97 interface
4 *
5 * Copyright 2008 Jon Smirl, Digispeaker
6 * Author: Jon Smirl <jonsmirl@gmail.com>
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/device.h>
17#include <linux/delay.h>
18#include <linux/of_device.h>
19#include <linux/of_platform.h>
20#include <linux/dma-mapping.h>
21
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/initval.h>
26#include <sound/soc.h>
27#include <sound/soc-of-simple.h>
28
29#include "mpc5200_dma.h"
30#include "mpc5200_psc_ac97.h"
31#include "../codecs/wm9712.h"
32
33static struct snd_soc_device device;
34static struct snd_soc_card card;
35
36static struct snd_soc_dai_link pcm030_fabric_dai[] = {
37{
38 .name = "AC97",
39 .stream_name = "AC97 Analog",
40 .codec_dai = &wm9712_dai[WM9712_DAI_AC97_HIFI],
41 .cpu_dai = &psc_ac97_dai[MPC5200_AC97_NORMAL],
42},
43{
44 .name = "AC97",
45 .stream_name = "AC97 IEC958",
46 .codec_dai = &wm9712_dai[WM9712_DAI_AC97_AUX],
47 .cpu_dai = &psc_ac97_dai[MPC5200_AC97_SPDIF],
48},
49};
50
51static __init int pcm030_fabric_init(void)
52{
53 struct platform_device *pdev;
54 int rc;
55
56 if (!machine_is_compatible("phytec,pcm030"))
57 return -ENODEV;
58
59 card.platform = &mpc5200_audio_dma_platform;
60 card.name = "pcm030";
61 card.dai_link = pcm030_fabric_dai;
62 card.num_links = ARRAY_SIZE(pcm030_fabric_dai);
63
64 device.card = &card;
65 device.codec_dev = &soc_codec_dev_wm9712;
66
67 pdev = platform_device_alloc("soc-audio", 1);
68 if (!pdev) {
69 pr_err("pcm030_fabric_init: platform_device_alloc() failed\n");
70 return -ENODEV;
71 }
72
73 platform_set_drvdata(pdev, &device);
74 device.dev = &pdev->dev;
75
76 rc = platform_device_add(pdev);
77 if (rc) {
78 pr_err("pcm030_fabric_init: platform_device_add() failed\n");
79 return -ENODEV;
80 }
81 return 0;
82}
83
84module_init(pcm030_fabric_init);
85
86
87MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
88MODULE_DESCRIPTION(DRV_NAME ": mpc5200 pcm030 fabric driver");
89MODULE_LICENSE("GPL");
90
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 675732e724d5..b771238662b6 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -39,6 +39,14 @@ config SND_OMAP_SOC_OMAP2EVM
39 help 39 help
40 Say Y if you want to add support for SoC audio on the omap2evm board. 40 Say Y if you want to add support for SoC audio on the omap2evm board.
41 41
42config SND_OMAP_SOC_OMAP3EVM
43 tristate "SoC Audio support for OMAP3EVM board"
44 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3EVM
45 select SND_OMAP_SOC_MCBSP
46 select SND_SOC_TWL4030
47 help
48 Say Y if you want to add support for SoC audio on the omap3evm board.
49
42config SND_OMAP_SOC_SDP3430 50config SND_OMAP_SOC_SDP3430
43 tristate "SoC Audio support for Texas Instruments SDP3430" 51 tristate "SoC Audio support for Texas Instruments SDP3430"
44 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP_3430SDP 52 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP_3430SDP
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 0c9e4ac37660..a37f49862389 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -10,6 +10,7 @@ snd-soc-n810-objs := n810.o
10snd-soc-osk5912-objs := osk5912.o 10snd-soc-osk5912-objs := osk5912.o
11snd-soc-overo-objs := overo.o 11snd-soc-overo-objs := overo.o
12snd-soc-omap2evm-objs := omap2evm.o 12snd-soc-omap2evm-objs := omap2evm.o
13snd-soc-omap3evm-objs := omap3evm.o
13snd-soc-sdp3430-objs := sdp3430.o 14snd-soc-sdp3430-objs := sdp3430.o
14snd-soc-omap3pandora-objs := omap3pandora.o 15snd-soc-omap3pandora-objs := omap3pandora.o
15snd-soc-omap3beagle-objs := omap3beagle.o 16snd-soc-omap3beagle-objs := omap3beagle.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
18obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o 19obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
19obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o 20obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o
20obj-$(CONFIG_MACH_OMAP2EVM) += snd-soc-omap2evm.o 21obj-$(CONFIG_MACH_OMAP2EVM) += snd-soc-omap2evm.o
22obj-$(CONFIG_MACH_OMAP3EVM) += snd-soc-omap3evm.o
21obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o 23obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o
22obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o 24obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
23obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o 25obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 91ef17992de5..b60b1dfbc435 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -383,10 +383,9 @@ static int __init n810_soc_init(void)
383 clk_set_parent(sys_clkout2_src, func96m_clk); 383 clk_set_parent(sys_clkout2_src, func96m_clk);
384 clk_set_rate(sys_clkout2, 12000000); 384 clk_set_rate(sys_clkout2, 12000000);
385 385
386 if (gpio_request(N810_HEADSET_AMP_GPIO, "hs_amp") < 0) 386 BUG_ON((gpio_request(N810_HEADSET_AMP_GPIO, "hs_amp") < 0) ||
387 BUG(); 387 (gpio_request(N810_SPEAKER_AMP_GPIO, "spk_amp") < 0));
388 if (gpio_request(N810_SPEAKER_AMP_GPIO, "spk_amp") < 0) 388
389 BUG();
390 gpio_direction_output(N810_HEADSET_AMP_GPIO, 0); 389 gpio_direction_output(N810_HEADSET_AMP_GPIO, 0);
391 gpio_direction_output(N810_SPEAKER_AMP_GPIO, 0); 390 gpio_direction_output(N810_SPEAKER_AMP_GPIO, 0);
392 391
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 912614283848..a5d46a7b196a 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -215,8 +215,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
215 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); 215 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
216 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; 216 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
217 int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id; 217 int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id;
218 int wlen, channels; 218 int wlen, channels, wpf;
219 unsigned long port; 219 unsigned long port;
220 unsigned int format;
220 221
221 if (cpu_class_is_omap1()) { 222 if (cpu_class_is_omap1()) {
222 dma = omap1_dma_reqs[bus_id][substream->stream]; 223 dma = omap1_dma_reqs[bus_id][substream->stream];
@@ -244,18 +245,24 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
244 return 0; 245 return 0;
245 } 246 }
246 247
247 channels = params_channels(params); 248 format = mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
249 wpf = channels = params_channels(params);
248 switch (channels) { 250 switch (channels) {
249 case 2: 251 case 2:
250 /* Use dual-phase frames */ 252 if (format == SND_SOC_DAIFMT_I2S) {
251 regs->rcr2 |= RPHASE; 253 /* Use dual-phase frames */
252 regs->xcr2 |= XPHASE; 254 regs->rcr2 |= RPHASE;
255 regs->xcr2 |= XPHASE;
256 /* Set 1 word per (McBSP) frame for phase1 and phase2 */
257 wpf--;
258 regs->rcr2 |= RFRLEN2(wpf - 1);
259 regs->xcr2 |= XFRLEN2(wpf - 1);
260 }
253 case 1: 261 case 1:
254 /* Set 1 word per (McBSP) frame */ 262 case 4:
255 regs->rcr2 |= RFRLEN2(1 - 1); 263 /* Set word per (McBSP) frame for phase1 */
256 regs->rcr1 |= RFRLEN1(1 - 1); 264 regs->rcr1 |= RFRLEN1(wpf - 1);
257 regs->xcr2 |= XFRLEN2(1 - 1); 265 regs->xcr1 |= XFRLEN1(wpf - 1);
258 regs->xcr1 |= XFRLEN1(1 - 1);
259 break; 266 break;
260 default: 267 default:
261 /* Unsupported number of channels */ 268 /* Unsupported number of channels */
@@ -277,11 +284,12 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
277 } 284 }
278 285
279 /* Set FS period and length in terms of bit clock periods */ 286 /* Set FS period and length in terms of bit clock periods */
280 switch (mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 287 switch (format) {
281 case SND_SOC_DAIFMT_I2S: 288 case SND_SOC_DAIFMT_I2S:
282 regs->srgr2 |= FPER(wlen * 2 - 1); 289 regs->srgr2 |= FPER(wlen * channels - 1);
283 regs->srgr1 |= FWID(wlen - 1); 290 regs->srgr1 |= FWID(wlen - 1);
284 break; 291 break;
292 case SND_SOC_DAIFMT_DSP_A:
285 case SND_SOC_DAIFMT_DSP_B: 293 case SND_SOC_DAIFMT_DSP_B:
286 regs->srgr2 |= FPER(wlen * channels - 1); 294 regs->srgr2 |= FPER(wlen * channels - 1);
287 regs->srgr1 |= FWID(0); 295 regs->srgr1 |= FWID(0);
@@ -326,6 +334,13 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
326 regs->rcr2 |= RDATDLY(1); 334 regs->rcr2 |= RDATDLY(1);
327 regs->xcr2 |= XDATDLY(1); 335 regs->xcr2 |= XDATDLY(1);
328 break; 336 break;
337 case SND_SOC_DAIFMT_DSP_A:
338 /* 1-bit data delay */
339 regs->rcr2 |= RDATDLY(1);
340 regs->xcr2 |= XDATDLY(1);
341 /* Invert FS polarity configuration */
342 temp_fmt ^= SND_SOC_DAIFMT_NB_IF;
343 break;
329 case SND_SOC_DAIFMT_DSP_B: 344 case SND_SOC_DAIFMT_DSP_B:
330 /* 0-bit data delay */ 345 /* 0-bit data delay */
331 regs->rcr2 |= RDATDLY(0); 346 regs->rcr2 |= RDATDLY(0);
@@ -492,13 +507,13 @@ static struct snd_soc_dai_ops omap_mcbsp_dai_ops = {
492 .id = (link_id), \ 507 .id = (link_id), \
493 .playback = { \ 508 .playback = { \
494 .channels_min = 1, \ 509 .channels_min = 1, \
495 .channels_max = 2, \ 510 .channels_max = 4, \
496 .rates = OMAP_MCBSP_RATES, \ 511 .rates = OMAP_MCBSP_RATES, \
497 .formats = SNDRV_PCM_FMTBIT_S16_LE, \ 512 .formats = SNDRV_PCM_FMTBIT_S16_LE, \
498 }, \ 513 }, \
499 .capture = { \ 514 .capture = { \
500 .channels_min = 1, \ 515 .channels_min = 1, \
501 .channels_max = 2, \ 516 .channels_max = 4, \
502 .rates = OMAP_MCBSP_RATES, \ 517 .rates = OMAP_MCBSP_RATES, \
503 .formats = SNDRV_PCM_FMTBIT_S16_LE, \ 518 .formats = SNDRV_PCM_FMTBIT_S16_LE, \
504 }, \ 519 }, \
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 07cf7f46b584..6454e15f7d28 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -87,8 +87,10 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
87 struct omap_pcm_dma_data *dma_data = rtd->dai->cpu_dai->dma_data; 87 struct omap_pcm_dma_data *dma_data = rtd->dai->cpu_dai->dma_data;
88 int err = 0; 88 int err = 0;
89 89
90 /* return if this is a bufferless transfer e.g.
91 * codec <--> BT codec or GSM modem -- lg FIXME */
90 if (!dma_data) 92 if (!dma_data)
91 return -ENODEV; 93 return 0;
92 94
93 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 95 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
94 runtime->dma_bytes = params_buffer_bytes(params); 96 runtime->dma_bytes = params_buffer_bytes(params);
@@ -134,6 +136,11 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream)
134 struct omap_pcm_dma_data *dma_data = prtd->dma_data; 136 struct omap_pcm_dma_data *dma_data = prtd->dma_data;
135 struct omap_dma_channel_params dma_params; 137 struct omap_dma_channel_params dma_params;
136 138
139 /* return if this is a bufferless transfer e.g.
140 * codec <--> BT codec or GSM modem -- lg FIXME */
141 if (!prtd->dma_data)
142 return 0;
143
137 memset(&dma_params, 0, sizeof(dma_params)); 144 memset(&dma_params, 0, sizeof(dma_params));
138 /* 145 /*
139 * Note: Regardless of interface data formats supported by OMAP McBSP 146 * Note: Regardless of interface data formats supported by OMAP McBSP
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c
index 0c2322dcf02a..027e1a40f8a1 100644
--- a/sound/soc/omap/omap2evm.c
+++ b/sound/soc/omap/omap2evm.c
@@ -86,7 +86,7 @@ static struct snd_soc_dai_link omap2evm_dai = {
86 .name = "TWL4030", 86 .name = "TWL4030",
87 .stream_name = "TWL4030", 87 .stream_name = "TWL4030",
88 .cpu_dai = &omap_mcbsp_dai[0], 88 .cpu_dai = &omap_mcbsp_dai[0],
89 .codec_dai = &twl4030_dai, 89 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
90 .ops = &omap2evm_ops, 90 .ops = &omap2evm_ops,
91}; 91};
92 92
diff --git a/sound/soc/omap/omap3beagle.c b/sound/soc/omap/omap3beagle.c
index fd24a4acd2f5..b0cff9f33b7e 100644
--- a/sound/soc/omap/omap3beagle.c
+++ b/sound/soc/omap/omap3beagle.c
@@ -41,23 +41,33 @@ static int omap3beagle_hw_params(struct snd_pcm_substream *substream,
41 struct snd_soc_pcm_runtime *rtd = substream->private_data; 41 struct snd_soc_pcm_runtime *rtd = substream->private_data;
42 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; 42 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
43 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 43 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
44 unsigned int fmt;
44 int ret; 45 int ret;
45 46
47 switch (params_channels(params)) {
48 case 2: /* Stereo I2S mode */
49 fmt = SND_SOC_DAIFMT_I2S |
50 SND_SOC_DAIFMT_NB_NF |
51 SND_SOC_DAIFMT_CBM_CFM;
52 break;
53 case 4: /* Four channel TDM mode */
54 fmt = SND_SOC_DAIFMT_DSP_A |
55 SND_SOC_DAIFMT_IB_NF |
56 SND_SOC_DAIFMT_CBM_CFM;
57 break;
58 default:
59 return -EINVAL;
60 }
61
46 /* Set codec DAI configuration */ 62 /* Set codec DAI configuration */
47 ret = snd_soc_dai_set_fmt(codec_dai, 63 ret = snd_soc_dai_set_fmt(codec_dai, fmt);
48 SND_SOC_DAIFMT_I2S |
49 SND_SOC_DAIFMT_NB_NF |
50 SND_SOC_DAIFMT_CBM_CFM);
51 if (ret < 0) { 64 if (ret < 0) {
52 printk(KERN_ERR "can't set codec DAI configuration\n"); 65 printk(KERN_ERR "can't set codec DAI configuration\n");
53 return ret; 66 return ret;
54 } 67 }
55 68
56 /* Set cpu DAI configuration */ 69 /* Set cpu DAI configuration */
57 ret = snd_soc_dai_set_fmt(cpu_dai, 70 ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
58 SND_SOC_DAIFMT_I2S |
59 SND_SOC_DAIFMT_NB_NF |
60 SND_SOC_DAIFMT_CBM_CFM);
61 if (ret < 0) { 71 if (ret < 0) {
62 printk(KERN_ERR "can't set cpu DAI configuration\n"); 72 printk(KERN_ERR "can't set cpu DAI configuration\n");
63 return ret; 73 return ret;
@@ -83,7 +93,7 @@ static struct snd_soc_dai_link omap3beagle_dai = {
83 .name = "TWL4030", 93 .name = "TWL4030",
84 .stream_name = "TWL4030", 94 .stream_name = "TWL4030",
85 .cpu_dai = &omap_mcbsp_dai[0], 95 .cpu_dai = &omap_mcbsp_dai[0],
86 .codec_dai = &twl4030_dai, 96 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
87 .ops = &omap3beagle_ops, 97 .ops = &omap3beagle_ops,
88}; 98};
89 99
diff --git a/sound/soc/omap/omap3evm.c b/sound/soc/omap/omap3evm.c
new file mode 100644
index 000000000000..9114c263077b
--- /dev/null
+++ b/sound/soc/omap/omap3evm.c
@@ -0,0 +1,147 @@
1/*
2 * omap3evm.c -- ALSA SoC support for OMAP3 EVM
3 *
4 * Author: Anuj Aggarwal <anuj.aggarwal@ti.com>
5 *
6 * Based on sound/soc/omap/beagle.c by Steve Sakoman
7 *
8 * Copyright (C) 2008 Texas Instruments, Incorporated
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation version 2.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
15 * whether express or implied; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20#include <linux/clk.h>
21#include <linux/platform_device.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/soc.h>
25#include <sound/soc-dapm.h>
26
27#include <asm/mach-types.h>
28#include <mach/hardware.h>
29#include <mach/gpio.h>
30#include <mach/mcbsp.h>
31
32#include "omap-mcbsp.h"
33#include "omap-pcm.h"
34#include "../codecs/twl4030.h"
35
36static int omap3evm_hw_params(struct snd_pcm_substream *substream,
37 struct snd_pcm_hw_params *params)
38{
39 struct snd_soc_pcm_runtime *rtd = substream->private_data;
40 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
41 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
42 int ret;
43
44 /* Set codec DAI configuration */
45 ret = snd_soc_dai_set_fmt(codec_dai,
46 SND_SOC_DAIFMT_I2S |
47 SND_SOC_DAIFMT_NB_NF |
48 SND_SOC_DAIFMT_CBM_CFM);
49 if (ret < 0) {
50 printk(KERN_ERR "Can't set codec DAI configuration\n");
51 return ret;
52 }
53
54 /* Set cpu DAI configuration */
55 ret = snd_soc_dai_set_fmt(cpu_dai,
56 SND_SOC_DAIFMT_I2S |
57 SND_SOC_DAIFMT_NB_NF |
58 SND_SOC_DAIFMT_CBM_CFM);
59 if (ret < 0) {
60 printk(KERN_ERR "Can't set cpu DAI configuration\n");
61 return ret;
62 }
63
64 /* Set the codec system clock for DAC and ADC */
65 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
66 SND_SOC_CLOCK_IN);
67 if (ret < 0) {
68 printk(KERN_ERR "Can't set codec system clock\n");
69 return ret;
70 }
71
72 return 0;
73}
74
75static struct snd_soc_ops omap3evm_ops = {
76 .hw_params = omap3evm_hw_params,
77};
78
79/* Digital audio interface glue - connects codec <--> CPU */
80static struct snd_soc_dai_link omap3evm_dai = {
81 .name = "TWL4030",
82 .stream_name = "TWL4030",
83 .cpu_dai = &omap_mcbsp_dai[0],
84 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
85 .ops = &omap3evm_ops,
86};
87
88/* Audio machine driver */
89static struct snd_soc_card snd_soc_omap3evm = {
90 .name = "omap3evm",
91 .platform = &omap_soc_platform,
92 .dai_link = &omap3evm_dai,
93 .num_links = 1,
94};
95
96/* Audio subsystem */
97static struct snd_soc_device omap3evm_snd_devdata = {
98 .card = &snd_soc_omap3evm,
99 .codec_dev = &soc_codec_dev_twl4030,
100};
101
102static struct platform_device *omap3evm_snd_device;
103
104static int __init omap3evm_soc_init(void)
105{
106 int ret;
107
108 if (!machine_is_omap3evm()) {
109 pr_err("Not OMAP3 EVM!\n");
110 return -ENODEV;
111 }
112 pr_info("OMAP3 EVM SoC init\n");
113
114 omap3evm_snd_device = platform_device_alloc("soc-audio", -1);
115 if (!omap3evm_snd_device) {
116 printk(KERN_ERR "Platform device allocation failed\n");
117 return -ENOMEM;
118 }
119
120 platform_set_drvdata(omap3evm_snd_device, &omap3evm_snd_devdata);
121 omap3evm_snd_devdata.dev = &omap3evm_snd_device->dev;
122 *(unsigned int *)omap3evm_dai.cpu_dai->private_data = 1;
123
124 ret = platform_device_add(omap3evm_snd_device);
125 if (ret)
126 goto err1;
127
128 return 0;
129
130err1:
131 printk(KERN_ERR "Unable to add platform device\n");
132 platform_device_put(omap3evm_snd_device);
133
134 return ret;
135}
136
137static void __exit omap3evm_soc_exit(void)
138{
139 platform_device_unregister(omap3evm_snd_device);
140}
141
142module_init(omap3evm_soc_init);
143module_exit(omap3evm_soc_exit);
144
145MODULE_AUTHOR("Anuj Aggarwal <anuj.aggarwal@ti.com>");
146MODULE_DESCRIPTION("ALSA SoC OMAP3 EVM");
147MODULE_LICENSE("GPLv2");
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index fe282d4ef422..ad219aaf7cb8 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -228,14 +228,14 @@ static struct snd_soc_dai_link omap3pandora_dai[] = {
228 .name = "PCM1773", 228 .name = "PCM1773",
229 .stream_name = "HiFi Out", 229 .stream_name = "HiFi Out",
230 .cpu_dai = &omap_mcbsp_dai[0], 230 .cpu_dai = &omap_mcbsp_dai[0],
231 .codec_dai = &twl4030_dai, 231 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
232 .ops = &omap3pandora_out_ops, 232 .ops = &omap3pandora_out_ops,
233 .init = omap3pandora_out_init, 233 .init = omap3pandora_out_init,
234 }, { 234 }, {
235 .name = "TWL4030", 235 .name = "TWL4030",
236 .stream_name = "Line/Mic In", 236 .stream_name = "Line/Mic In",
237 .cpu_dai = &omap_mcbsp_dai[1], 237 .cpu_dai = &omap_mcbsp_dai[1],
238 .codec_dai = &twl4030_dai, 238 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
239 .ops = &omap3pandora_in_ops, 239 .ops = &omap3pandora_in_ops,
240 .init = omap3pandora_in_init, 240 .init = omap3pandora_in_init,
241 } 241 }
diff --git a/sound/soc/omap/overo.c b/sound/soc/omap/overo.c
index a72dc4e159e5..ec4f8fd8b3a2 100644
--- a/sound/soc/omap/overo.c
+++ b/sound/soc/omap/overo.c
@@ -83,7 +83,7 @@ static struct snd_soc_dai_link overo_dai = {
83 .name = "TWL4030", 83 .name = "TWL4030",
84 .stream_name = "TWL4030", 84 .stream_name = "TWL4030",
85 .cpu_dai = &omap_mcbsp_dai[0], 85 .cpu_dai = &omap_mcbsp_dai[0],
86 .codec_dai = &twl4030_dai, 86 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
87 .ops = &overo_ops, 87 .ops = &overo_ops,
88}; 88};
89 89
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index 10f1c867f11d..b719e5db4f57 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -84,6 +84,49 @@ static struct snd_soc_ops sdp3430_ops = {
84 .hw_params = sdp3430_hw_params, 84 .hw_params = sdp3430_hw_params,
85}; 85};
86 86
87static int sdp3430_hw_voice_params(struct snd_pcm_substream *substream,
88 struct snd_pcm_hw_params *params)
89{
90 struct snd_soc_pcm_runtime *rtd = substream->private_data;
91 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
92 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
93 int ret;
94
95 /* Set codec DAI configuration */
96 ret = snd_soc_dai_set_fmt(codec_dai,
97 SND_SOC_DAIFMT_DSP_A |
98 SND_SOC_DAIFMT_IB_NF |
99 SND_SOC_DAIFMT_CBS_CFM);
100 if (ret) {
101 printk(KERN_ERR "can't set codec DAI configuration\n");
102 return ret;
103 }
104
105 /* Set cpu DAI configuration */
106 ret = snd_soc_dai_set_fmt(cpu_dai,
107 SND_SOC_DAIFMT_DSP_A |
108 SND_SOC_DAIFMT_IB_NF |
109 SND_SOC_DAIFMT_CBM_CFM);
110 if (ret < 0) {
111 printk(KERN_ERR "can't set cpu DAI configuration\n");
112 return ret;
113 }
114
115 /* Set the codec system clock for DAC and ADC */
116 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
117 SND_SOC_CLOCK_IN);
118 if (ret < 0) {
119 printk(KERN_ERR "can't set codec system clock\n");
120 return ret;
121 }
122
123 return 0;
124}
125
126static struct snd_soc_ops sdp3430_voice_ops = {
127 .hw_params = sdp3430_hw_voice_params,
128};
129
87/* Headset jack */ 130/* Headset jack */
88static struct snd_soc_jack hs_jack; 131static struct snd_soc_jack hs_jack;
89 132
@@ -192,28 +235,58 @@ static int sdp3430_twl4030_init(struct snd_soc_codec *codec)
192 return ret; 235 return ret;
193} 236}
194 237
238static int sdp3430_twl4030_voice_init(struct snd_soc_codec *codec)
239{
240 unsigned short reg;
241
242 /* Enable voice interface */
243 reg = codec->read(codec, TWL4030_REG_VOICE_IF);
244 reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN;
245 codec->write(codec, TWL4030_REG_VOICE_IF, reg);
246
247 return 0;
248}
249
250
195/* Digital audio interface glue - connects codec <--> CPU */ 251/* Digital audio interface glue - connects codec <--> CPU */
196static struct snd_soc_dai_link sdp3430_dai = { 252static struct snd_soc_dai_link sdp3430_dai[] = {
197 .name = "TWL4030", 253 {
198 .stream_name = "TWL4030", 254 .name = "TWL4030 I2S",
199 .cpu_dai = &omap_mcbsp_dai[0], 255 .stream_name = "TWL4030 Audio",
200 .codec_dai = &twl4030_dai, 256 .cpu_dai = &omap_mcbsp_dai[0],
201 .init = sdp3430_twl4030_init, 257 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
202 .ops = &sdp3430_ops, 258 .init = sdp3430_twl4030_init,
259 .ops = &sdp3430_ops,
260 },
261 {
262 .name = "TWL4030 PCM",
263 .stream_name = "TWL4030 Voice",
264 .cpu_dai = &omap_mcbsp_dai[1],
265 .codec_dai = &twl4030_dai[TWL4030_DAI_VOICE],
266 .init = sdp3430_twl4030_voice_init,
267 .ops = &sdp3430_voice_ops,
268 },
203}; 269};
204 270
205/* Audio machine driver */ 271/* Audio machine driver */
206static struct snd_soc_card snd_soc_sdp3430 = { 272static struct snd_soc_card snd_soc_sdp3430 = {
207 .name = "SDP3430", 273 .name = "SDP3430",
208 .platform = &omap_soc_platform, 274 .platform = &omap_soc_platform,
209 .dai_link = &sdp3430_dai, 275 .dai_link = sdp3430_dai,
210 .num_links = 1, 276 .num_links = ARRAY_SIZE(sdp3430_dai),
277};
278
279/* twl4030 setup */
280static struct twl4030_setup_data twl4030_setup = {
281 .ramp_delay_value = 3,
282 .sysclk = 26000,
211}; 283};
212 284
213/* Audio subsystem */ 285/* Audio subsystem */
214static struct snd_soc_device sdp3430_snd_devdata = { 286static struct snd_soc_device sdp3430_snd_devdata = {
215 .card = &snd_soc_sdp3430, 287 .card = &snd_soc_sdp3430,
216 .codec_dev = &soc_codec_dev_twl4030, 288 .codec_dev = &soc_codec_dev_twl4030,
289 .codec_data = &twl4030_setup,
217}; 290};
218 291
219static struct platform_device *sdp3430_snd_device; 292static struct platform_device *sdp3430_snd_device;
@@ -236,7 +309,8 @@ static int __init sdp3430_soc_init(void)
236 309
237 platform_set_drvdata(sdp3430_snd_device, &sdp3430_snd_devdata); 310 platform_set_drvdata(sdp3430_snd_device, &sdp3430_snd_devdata);
238 sdp3430_snd_devdata.dev = &sdp3430_snd_device->dev; 311 sdp3430_snd_devdata.dev = &sdp3430_snd_device->dev;
239 *(unsigned int *)sdp3430_dai.cpu_dai->private_data = 1; /* McBSP2 */ 312 *(unsigned int *)sdp3430_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
313 *(unsigned int *)sdp3430_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
240 314
241 ret = platform_device_add(sdp3430_snd_device); 315 ret = platform_device_add(sdp3430_snd_device);
242 if (ret) 316 if (ret)
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index ad8a10fe6298..dcd163a4ee9a 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -89,13 +89,13 @@ config SND_PXA2XX_SOC_E800
89 Toshiba e800 PDA 89 Toshiba e800 PDA
90 90
91config SND_PXA2XX_SOC_EM_X270 91config SND_PXA2XX_SOC_EM_X270
92 tristate "SoC Audio support for CompuLab EM-x270" 92 tristate "SoC Audio support for CompuLab EM-x270, eXeda and CM-X300"
93 depends on SND_PXA2XX_SOC && MACH_EM_X270 93 depends on SND_PXA2XX_SOC && MACH_EM_X270
94 select SND_PXA2XX_SOC_AC97 94 select SND_PXA2XX_SOC_AC97
95 select SND_SOC_WM9712 95 select SND_SOC_WM9712
96 help 96 help
97 Say Y if you want to add support for SoC audio on 97 Say Y if you want to add support for SoC audio on
98 CompuLab EM-x270. 98 CompuLab EM-x270, eXeda and CM-X300 machines.
99 99
100config SND_PXA2XX_SOC_PALM27X 100config SND_PXA2XX_SOC_PALM27X
101 bool "SoC Audio support for Palm T|X, T5 and LifeDrive" 101 bool "SoC Audio support for Palm T|X, T5 and LifeDrive"
@@ -134,3 +134,12 @@ config SND_PXA2XX_SOC_MIOA701
134 help 134 help
135 Say Y if you want to add support for SoC audio on the 135 Say Y if you want to add support for SoC audio on the
136 MIO A701. 136 MIO A701.
137
138config SND_PXA2XX_SOC_IMOTE2
139 tristate "SoC Audio support for IMote 2"
140 depends on SND_PXA2XX_SOC && MACH_INTELMOTE2
141 select SND_PXA2XX_SOC_I2S
142 select SND_SOC_WM8940
143 help
144 Say Y if you want to add support for SoC audio on the
145 IMote 2.
diff --git a/sound/soc/pxa/Makefile b/sound/soc/pxa/Makefile
index 4b90c3ccae45..6e096b480335 100644
--- a/sound/soc/pxa/Makefile
+++ b/sound/soc/pxa/Makefile
@@ -22,6 +22,7 @@ snd-soc-palm27x-objs := palm27x.o
22snd-soc-zylonite-objs := zylonite.o 22snd-soc-zylonite-objs := zylonite.o
23snd-soc-magician-objs := magician.o 23snd-soc-magician-objs := magician.o
24snd-soc-mioa701-objs := mioa701_wm9713.o 24snd-soc-mioa701-objs := mioa701_wm9713.o
25snd-soc-imote2-objs := imote2.o
25 26
26obj-$(CONFIG_SND_PXA2XX_SOC_CORGI) += snd-soc-corgi.o 27obj-$(CONFIG_SND_PXA2XX_SOC_CORGI) += snd-soc-corgi.o
27obj-$(CONFIG_SND_PXA2XX_SOC_POODLE) += snd-soc-poodle.o 28obj-$(CONFIG_SND_PXA2XX_SOC_POODLE) += snd-soc-poodle.o
@@ -35,3 +36,4 @@ obj-$(CONFIG_SND_PXA2XX_SOC_PALM27X) += snd-soc-palm27x.o
35obj-$(CONFIG_SND_PXA2XX_SOC_MAGICIAN) += snd-soc-magician.o 36obj-$(CONFIG_SND_PXA2XX_SOC_MAGICIAN) += snd-soc-magician.o
36obj-$(CONFIG_SND_PXA2XX_SOC_MIOA701) += snd-soc-mioa701.o 37obj-$(CONFIG_SND_PXA2XX_SOC_MIOA701) += snd-soc-mioa701.o
37obj-$(CONFIG_SND_SOC_ZYLONITE) += snd-soc-zylonite.o 38obj-$(CONFIG_SND_SOC_ZYLONITE) += snd-soc-zylonite.o
39obj-$(CONFIG_SND_PXA2XX_SOC_IMOTE2) += snd-soc-imote2.o
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index 949be9c2a01b..f4756e4025fd 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * em-x270.c -- SoC audio for EM-X270 2 * SoC audio driver for EM-X270, eXeda and CM-X300
3 * 3 *
4 * Copyright 2007 CompuLab, Ltd. 4 * Copyright 2007, 2009 CompuLab, Ltd.
5 * 5 *
6 * Author: Mike Rapoport <mike@compulab.co.il> 6 * Author: Mike Rapoport <mike@compulab.co.il>
7 * 7 *
@@ -68,7 +68,8 @@ static int __init em_x270_init(void)
68{ 68{
69 int ret; 69 int ret;
70 70
71 if (!machine_is_em_x270()) 71 if (!(machine_is_em_x270() || machine_is_exeda()
72 || machine_is_cm_x300()))
72 return -ENODEV; 73 return -ENODEV;
73 74
74 em_x270_snd_device = platform_device_alloc("soc-audio", -1); 75 em_x270_snd_device = platform_device_alloc("soc-audio", -1);
@@ -95,5 +96,5 @@ module_exit(em_x270_exit);
95 96
96/* Module information */ 97/* Module information */
97MODULE_AUTHOR("Mike Rapoport"); 98MODULE_AUTHOR("Mike Rapoport");
98MODULE_DESCRIPTION("ALSA SoC EM-X270"); 99MODULE_DESCRIPTION("ALSA SoC EM-X270, eXeda and CM-X300");
99MODULE_LICENSE("GPL"); 100MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c
new file mode 100644
index 000000000000..405587a01160
--- /dev/null
+++ b/sound/soc/pxa/imote2.c
@@ -0,0 +1,114 @@
1
2#include <linux/module.h>
3#include <sound/soc.h>
4
5#include <asm/mach-types.h>
6
7#include "../codecs/wm8940.h"
8#include "pxa2xx-i2s.h"
9#include "pxa2xx-pcm.h"
10
11static int imote2_asoc_hw_params(struct snd_pcm_substream *substream,
12 struct snd_pcm_hw_params *params)
13{
14 struct snd_soc_pcm_runtime *rtd = substream->private_data;
15 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
16 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
17 unsigned int clk = 0;
18 int ret;
19
20 switch (params_rate(params)) {
21 case 8000:
22 case 16000:
23 case 48000:
24 case 96000:
25 clk = 12288000;
26 break;
27 case 11025:
28 case 22050:
29 case 44100:
30 clk = 11289600;
31 break;
32 }
33
34 /* set codec DAI configuration */
35 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
36 | SND_SOC_DAIFMT_NB_NF
37 | SND_SOC_DAIFMT_CBS_CFS);
38 if (ret < 0)
39 return ret;
40
41 /* CPU should be clock master */
42 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
43 | SND_SOC_DAIFMT_NB_NF
44 | SND_SOC_DAIFMT_CBS_CFS);
45 if (ret < 0)
46 return ret;
47
48 ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk,
49 SND_SOC_CLOCK_IN);
50 if (ret < 0)
51 return ret;
52
53 /* set the I2S system clock as input (unused) */
54 ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, clk,
55 SND_SOC_CLOCK_OUT);
56
57 return ret;
58}
59
60static struct snd_soc_ops imote2_asoc_ops = {
61 .hw_params = imote2_asoc_hw_params,
62};
63
64static struct snd_soc_dai_link imote2_dai = {
65 .name = "WM8940",
66 .stream_name = "WM8940",
67 .cpu_dai = &pxa_i2s_dai,
68 .codec_dai = &wm8940_dai,
69 .ops = &imote2_asoc_ops,
70};
71
72static struct snd_soc_card snd_soc_imote2 = {
73 .name = "Imote2",
74 .platform = &pxa2xx_soc_platform,
75 .dai_link = &imote2_dai,
76 .num_links = 1,
77};
78
79static struct snd_soc_device imote2_snd_devdata = {
80 .card = &snd_soc_imote2,
81 .codec_dev = &soc_codec_dev_wm8940,
82};
83
84static struct platform_device *imote2_snd_device;
85
86static int __init imote2_asoc_init(void)
87{
88 int ret;
89
90 if (!machine_is_intelmote2())
91 return -ENODEV;
92 imote2_snd_device = platform_device_alloc("soc-audio", -1);
93 if (!imote2_snd_device)
94 return -ENOMEM;
95
96 platform_set_drvdata(imote2_snd_device, &imote2_snd_devdata);
97 imote2_snd_devdata.dev = &imote2_snd_device->dev;
98 ret = platform_device_add(imote2_snd_device);
99 if (ret)
100 platform_device_put(imote2_snd_device);
101
102 return ret;
103}
104module_init(imote2_asoc_init);
105
106static void __exit imote2_asoc_exit(void)
107{
108 platform_device_unregister(imote2_snd_device);
109}
110module_exit(imote2_asoc_exit);
111
112MODULE_AUTHOR("Jonathan Cameron");
113MODULE_DESCRIPTION("ALSA SoC Imote 2");
114MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 0625c342a1c9..c89a3cdf31e4 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -106,7 +106,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
106 /* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */ 106 /* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */
107 acds = PXA_SSP_CLK_AUDIO_DIV_16; 107 acds = PXA_SSP_CLK_AUDIO_DIV_16;
108 break; 108 break;
109 case 32: 109 default: /* 32 */
110 /* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */ 110 /* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */
111 acds = PXA_SSP_CLK_AUDIO_DIV_8; 111 acds = PXA_SSP_CLK_AUDIO_DIV_8;
112 } 112 }
@@ -118,7 +118,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
118 /* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */ 118 /* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */
119 acds = PXA_SSP_CLK_AUDIO_DIV_4; 119 acds = PXA_SSP_CLK_AUDIO_DIV_4;
120 break; 120 break;
121 case 32: 121 default: /* 32 */
122 /* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */ 122 /* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */
123 acds = PXA_SSP_CLK_AUDIO_DIV_2; 123 acds = PXA_SSP_CLK_AUDIO_DIV_2;
124 } 124 }
@@ -130,7 +130,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
130 /* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */ 130 /* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */
131 acds = PXA_SSP_CLK_AUDIO_DIV_2; 131 acds = PXA_SSP_CLK_AUDIO_DIV_2;
132 break; 132 break;
133 case 32: 133 default: /* 32 */
134 /* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */ 134 /* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */
135 acds = PXA_SSP_CLK_AUDIO_DIV_1; 135 acds = PXA_SSP_CLK_AUDIO_DIV_1;
136 } 136 }
@@ -142,7 +142,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
142 /* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */ 142 /* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */
143 acds = PXA_SSP_CLK_AUDIO_DIV_2; 143 acds = PXA_SSP_CLK_AUDIO_DIV_2;
144 break; 144 break;
145 case 32: 145 default: /* 32 */
146 /* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */ 146 /* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */
147 acds = PXA_SSP_CLK_AUDIO_DIV_1; 147 acds = PXA_SSP_CLK_AUDIO_DIV_1;
148 } 148 }
@@ -154,19 +154,20 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
154 /* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */ 154 /* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */
155 acds = PXA_SSP_CLK_AUDIO_DIV_2; 155 acds = PXA_SSP_CLK_AUDIO_DIV_2;
156 break; 156 break;
157 case 32: 157 default: /* 32 */
158 /* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */ 158 /* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */
159 acds = PXA_SSP_CLK_AUDIO_DIV_1; 159 acds = PXA_SSP_CLK_AUDIO_DIV_1;
160 } 160 }
161 break; 161 break;
162 case 96000: 162 case 96000:
163 default:
163 acps = 12235000; 164 acps = 12235000;
164 switch (width) { 165 switch (width) {
165 case 16: 166 case 16:
166 /* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */ 167 /* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */
167 acds = PXA_SSP_CLK_AUDIO_DIV_1; 168 acds = PXA_SSP_CLK_AUDIO_DIV_1;
168 break; 169 break;
169 case 32: 170 default: /* 32 */
170 /* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */ 171 /* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */
171 acds = PXA_SSP_CLK_AUDIO_DIV_2; 172 acds = PXA_SSP_CLK_AUDIO_DIV_2;
172 div4 = PXA_SSP_CLK_SCDB_1; 173 div4 = PXA_SSP_CLK_SCDB_1;
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 286be31545df..19c45409d94c 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -50,139 +50,6 @@ struct ssp_priv {
50#endif 50#endif
51}; 51};
52 52
53#define PXA2xx_SSP1_BASE 0x41000000
54#define PXA27x_SSP2_BASE 0x41700000
55#define PXA27x_SSP3_BASE 0x41900000
56#define PXA3xx_SSP4_BASE 0x41a00000
57
58static struct pxa2xx_pcm_dma_params pxa_ssp1_pcm_mono_out = {
59 .name = "SSP1 PCM Mono out",
60 .dev_addr = PXA2xx_SSP1_BASE + SSDR,
61 .drcmr = &DRCMR(14),
62 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
63 DCMD_BURST16 | DCMD_WIDTH2,
64};
65
66static struct pxa2xx_pcm_dma_params pxa_ssp1_pcm_mono_in = {
67 .name = "SSP1 PCM Mono in",
68 .dev_addr = PXA2xx_SSP1_BASE + SSDR,
69 .drcmr = &DRCMR(13),
70 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
71 DCMD_BURST16 | DCMD_WIDTH2,
72};
73
74static struct pxa2xx_pcm_dma_params pxa_ssp1_pcm_stereo_out = {
75 .name = "SSP1 PCM Stereo out",
76 .dev_addr = PXA2xx_SSP1_BASE + SSDR,
77 .drcmr = &DRCMR(14),
78 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
79 DCMD_BURST16 | DCMD_WIDTH4,
80};
81
82static struct pxa2xx_pcm_dma_params pxa_ssp1_pcm_stereo_in = {
83 .name = "SSP1 PCM Stereo in",
84 .dev_addr = PXA2xx_SSP1_BASE + SSDR,
85 .drcmr = &DRCMR(13),
86 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
87 DCMD_BURST16 | DCMD_WIDTH4,
88};
89
90static struct pxa2xx_pcm_dma_params pxa_ssp2_pcm_mono_out = {
91 .name = "SSP2 PCM Mono out",
92 .dev_addr = PXA27x_SSP2_BASE + SSDR,
93 .drcmr = &DRCMR(16),
94 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
95 DCMD_BURST16 | DCMD_WIDTH2,
96};
97
98static struct pxa2xx_pcm_dma_params pxa_ssp2_pcm_mono_in = {
99 .name = "SSP2 PCM Mono in",
100 .dev_addr = PXA27x_SSP2_BASE + SSDR,
101 .drcmr = &DRCMR(15),
102 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
103 DCMD_BURST16 | DCMD_WIDTH2,
104};
105
106static struct pxa2xx_pcm_dma_params pxa_ssp2_pcm_stereo_out = {
107 .name = "SSP2 PCM Stereo out",
108 .dev_addr = PXA27x_SSP2_BASE + SSDR,
109 .drcmr = &DRCMR(16),
110 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
111 DCMD_BURST16 | DCMD_WIDTH4,
112};
113
114static struct pxa2xx_pcm_dma_params pxa_ssp2_pcm_stereo_in = {
115 .name = "SSP2 PCM Stereo in",
116 .dev_addr = PXA27x_SSP2_BASE + SSDR,
117 .drcmr = &DRCMR(15),
118 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
119 DCMD_BURST16 | DCMD_WIDTH4,
120};
121
122static struct pxa2xx_pcm_dma_params pxa_ssp3_pcm_mono_out = {
123 .name = "SSP3 PCM Mono out",
124 .dev_addr = PXA27x_SSP3_BASE + SSDR,
125 .drcmr = &DRCMR(67),
126 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
127 DCMD_BURST16 | DCMD_WIDTH2,
128};
129
130static struct pxa2xx_pcm_dma_params pxa_ssp3_pcm_mono_in = {
131 .name = "SSP3 PCM Mono in",
132 .dev_addr = PXA27x_SSP3_BASE + SSDR,
133 .drcmr = &DRCMR(66),
134 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
135 DCMD_BURST16 | DCMD_WIDTH2,
136};
137
138static struct pxa2xx_pcm_dma_params pxa_ssp3_pcm_stereo_out = {
139 .name = "SSP3 PCM Stereo out",
140 .dev_addr = PXA27x_SSP3_BASE + SSDR,
141 .drcmr = &DRCMR(67),
142 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
143 DCMD_BURST16 | DCMD_WIDTH4,
144};
145
146static struct pxa2xx_pcm_dma_params pxa_ssp3_pcm_stereo_in = {
147 .name = "SSP3 PCM Stereo in",
148 .dev_addr = PXA27x_SSP3_BASE + SSDR,
149 .drcmr = &DRCMR(66),
150 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
151 DCMD_BURST16 | DCMD_WIDTH4,
152};
153
154static struct pxa2xx_pcm_dma_params pxa_ssp4_pcm_mono_out = {
155 .name = "SSP4 PCM Mono out",
156 .dev_addr = PXA3xx_SSP4_BASE + SSDR,
157 .drcmr = &DRCMR(67),
158 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
159 DCMD_BURST16 | DCMD_WIDTH2,
160};
161
162static struct pxa2xx_pcm_dma_params pxa_ssp4_pcm_mono_in = {
163 .name = "SSP4 PCM Mono in",
164 .dev_addr = PXA3xx_SSP4_BASE + SSDR,
165 .drcmr = &DRCMR(66),
166 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
167 DCMD_BURST16 | DCMD_WIDTH2,
168};
169
170static struct pxa2xx_pcm_dma_params pxa_ssp4_pcm_stereo_out = {
171 .name = "SSP4 PCM Stereo out",
172 .dev_addr = PXA3xx_SSP4_BASE + SSDR,
173 .drcmr = &DRCMR(67),
174 .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
175 DCMD_BURST16 | DCMD_WIDTH4,
176};
177
178static struct pxa2xx_pcm_dma_params pxa_ssp4_pcm_stereo_in = {
179 .name = "SSP4 PCM Stereo in",
180 .dev_addr = PXA3xx_SSP4_BASE + SSDR,
181 .drcmr = &DRCMR(66),
182 .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
183 DCMD_BURST16 | DCMD_WIDTH4,
184};
185
186static void dump_registers(struct ssp_device *ssp) 53static void dump_registers(struct ssp_device *ssp)
187{ 54{
188 dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n", 55 dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n",
@@ -194,25 +61,33 @@ static void dump_registers(struct ssp_device *ssp)
194 ssp_read_reg(ssp, SSACD)); 61 ssp_read_reg(ssp, SSACD));
195} 62}
196 63
197static struct pxa2xx_pcm_dma_params *ssp_dma_params[4][4] = { 64struct pxa2xx_pcm_dma_data {
198 { 65 struct pxa2xx_pcm_dma_params params;
199 &pxa_ssp1_pcm_mono_out, &pxa_ssp1_pcm_mono_in, 66 char name[20];
200 &pxa_ssp1_pcm_stereo_out, &pxa_ssp1_pcm_stereo_in,
201 },
202 {
203 &pxa_ssp2_pcm_mono_out, &pxa_ssp2_pcm_mono_in,
204 &pxa_ssp2_pcm_stereo_out, &pxa_ssp2_pcm_stereo_in,
205 },
206 {
207 &pxa_ssp3_pcm_mono_out, &pxa_ssp3_pcm_mono_in,
208 &pxa_ssp3_pcm_stereo_out, &pxa_ssp3_pcm_stereo_in,
209 },
210 {
211 &pxa_ssp4_pcm_mono_out, &pxa_ssp4_pcm_mono_in,
212 &pxa_ssp4_pcm_stereo_out, &pxa_ssp4_pcm_stereo_in,
213 },
214}; 67};
215 68
69static struct pxa2xx_pcm_dma_params *
70ssp_get_dma_params(struct ssp_device *ssp, int width4, int out)
71{
72 struct pxa2xx_pcm_dma_data *dma;
73
74 dma = kzalloc(sizeof(struct pxa2xx_pcm_dma_data), GFP_KERNEL);
75 if (dma == NULL)
76 return NULL;
77
78 snprintf(dma->name, 20, "SSP%d PCM %s %s", ssp->port_id,
79 width4 ? "32-bit" : "16-bit", out ? "out" : "in");
80
81 dma->params.name = dma->name;
82 dma->params.drcmr = &DRCMR(out ? ssp->drcmr_tx : ssp->drcmr_rx);
83 dma->params.dcmd = (out ? (DCMD_INCSRCADDR | DCMD_FLOWTRG) :
84 (DCMD_INCTRGADDR | DCMD_FLOWSRC)) |
85 (width4 ? DCMD_WIDTH4 : DCMD_WIDTH2) | DCMD_BURST16;
86 dma->params.dev_addr = ssp->phys_base + SSDR;
87
88 return &dma->params;
89}
90
216static int pxa_ssp_startup(struct snd_pcm_substream *substream, 91static int pxa_ssp_startup(struct snd_pcm_substream *substream,
217 struct snd_soc_dai *dai) 92 struct snd_soc_dai *dai)
218{ 93{
@@ -227,6 +102,11 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
227 clk_enable(priv->dev.ssp->clk); 102 clk_enable(priv->dev.ssp->clk);
228 ssp_disable(&priv->dev); 103 ssp_disable(&priv->dev);
229 } 104 }
105
106 if (cpu_dai->dma_data) {
107 kfree(cpu_dai->dma_data);
108 cpu_dai->dma_data = NULL;
109 }
230 return ret; 110 return ret;
231} 111}
232 112
@@ -241,6 +121,11 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream,
241 ssp_disable(&priv->dev); 121 ssp_disable(&priv->dev);
242 clk_disable(priv->dev.ssp->clk); 122 clk_disable(priv->dev.ssp->clk);
243 } 123 }
124
125 if (cpu_dai->dma_data) {
126 kfree(cpu_dai->dma_data);
127 cpu_dai->dma_data = NULL;
128 }
244} 129}
245 130
246#ifdef CONFIG_PM 131#ifdef CONFIG_PM
@@ -323,7 +208,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
323 ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); 208 ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS);
324 209
325 dev_dbg(&ssp->pdev->dev, 210 dev_dbg(&ssp->pdev->dev,
326 "pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %d\n", 211 "pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n",
327 cpu_dai->id, clk_id, freq); 212 cpu_dai->id, clk_id, freq);
328 213
329 switch (clk_id) { 214 switch (clk_id) {
@@ -472,7 +357,7 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai,
472 ssacd |= (0x6 << 4); 357 ssacd |= (0x6 << 4);
473 358
474 dev_dbg(&ssp->pdev->dev, 359 dev_dbg(&ssp->pdev->dev,
475 "Using SSACDD %x to supply %dHz\n", 360 "Using SSACDD %x to supply %uHz\n",
476 val, freq_out); 361 val, freq_out);
477 break; 362 break;
478 } 363 }
@@ -589,7 +474,10 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
589 case SND_SOC_DAIFMT_NB_IF: 474 case SND_SOC_DAIFMT_NB_IF:
590 break; 475 break;
591 case SND_SOC_DAIFMT_IB_IF: 476 case SND_SOC_DAIFMT_IB_IF:
592 sspsp |= SSPSP_SCMODE(3); 477 sspsp |= SSPSP_SCMODE(2);
478 break;
479 case SND_SOC_DAIFMT_IB_NF:
480 sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
593 break; 481 break;
594 default: 482 default:
595 return -EINVAL; 483 return -EINVAL;
@@ -606,7 +494,13 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
606 case SND_SOC_DAIFMT_NB_NF: 494 case SND_SOC_DAIFMT_NB_NF:
607 sspsp |= SSPSP_SFRMP; 495 sspsp |= SSPSP_SFRMP;
608 break; 496 break;
497 case SND_SOC_DAIFMT_NB_IF:
498 break;
609 case SND_SOC_DAIFMT_IB_IF: 499 case SND_SOC_DAIFMT_IB_IF:
500 sspsp |= SSPSP_SCMODE(2);
501 break;
502 case SND_SOC_DAIFMT_IB_NF:
503 sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
610 break; 504 break;
611 default: 505 default:
612 return -EINVAL; 506 return -EINVAL;
@@ -644,25 +538,23 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
644 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 538 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
645 struct ssp_priv *priv = cpu_dai->private_data; 539 struct ssp_priv *priv = cpu_dai->private_data;
646 struct ssp_device *ssp = priv->dev.ssp; 540 struct ssp_device *ssp = priv->dev.ssp;
647 int dma = 0, chn = params_channels(params); 541 int chn = params_channels(params);
648 u32 sscr0; 542 u32 sscr0;
649 u32 sspsp; 543 u32 sspsp;
650 int width = snd_pcm_format_physical_width(params_format(params)); 544 int width = snd_pcm_format_physical_width(params_format(params));
651 int ttsa = ssp_read_reg(ssp, SSTSA) & 0xf; 545 int ttsa = ssp_read_reg(ssp, SSTSA) & 0xf;
652 546
653 /* select correct DMA params */ 547 /* generate correct DMA params */
654 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 548 if (cpu_dai->dma_data)
655 dma = 1; /* capture DMA offset is 1,3 */ 549 kfree(cpu_dai->dma_data);
550
656 /* Network mode with one active slot (ttsa == 1) can be used 551 /* Network mode with one active slot (ttsa == 1) can be used
657 * to force 16-bit frame width on the wire (for S16_LE), even 552 * to force 16-bit frame width on the wire (for S16_LE), even
658 * with two channels. Use 16-bit DMA transfers for this case. 553 * with two channels. Use 16-bit DMA transfers for this case.
659 */ 554 */
660 if (((chn == 2) && (ttsa != 1)) || (width == 32)) 555 cpu_dai->dma_data = ssp_get_dma_params(ssp,
661 dma += 2; /* 32-bit DMA offset is 2, 16-bit is 0 */ 556 ((chn == 2) && (ttsa != 1)) || (width == 32),
662 557 substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
663 cpu_dai->dma_data = ssp_dma_params[cpu_dai->id][dma];
664
665 dev_dbg(&ssp->pdev->dev, "pxa_ssp_hw_params: dma %d\n", dma);
666 558
667 /* we can only change the settings if the port is not in use */ 559 /* we can only change the settings if the port is not in use */
668 if (ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) 560 if (ssp_read_reg(ssp, SSCR0) & SSCR0_SSE)
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 2f4b6e489b78..4743e262895d 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -106,10 +106,8 @@ static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
106 if (IS_ERR(clk_i2s)) 106 if (IS_ERR(clk_i2s))
107 return PTR_ERR(clk_i2s); 107 return PTR_ERR(clk_i2s);
108 108
109 if (!cpu_dai->active) { 109 if (!cpu_dai->active)
110 SACR0 |= SACR0_RST;
111 SACR0 = 0; 110 SACR0 = 0;
112 }
113 111
114 return 0; 112 return 0;
115} 113}
@@ -178,9 +176,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
178 176
179 /* is port used by another stream */ 177 /* is port used by another stream */
180 if (!(SACR0 & SACR0_ENB)) { 178 if (!(SACR0 & SACR0_ENB)) {
181
182 SACR0 = 0; 179 SACR0 = 0;
183 SACR1 = 0;
184 if (pxa_i2s.master) 180 if (pxa_i2s.master)
185 SACR0 |= SACR0_BCKD; 181 SACR0 |= SACR0_BCKD;
186 182
@@ -226,6 +222,10 @@ static int pxa2xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
226 222
227 switch (cmd) { 223 switch (cmd) {
228 case SNDRV_PCM_TRIGGER_START: 224 case SNDRV_PCM_TRIGGER_START:
225 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
226 SACR1 &= ~SACR1_DRPL;
227 else
228 SACR1 &= ~SACR1_DREC;
229 SACR0 |= SACR0_ENB; 229 SACR0 |= SACR0_ENB;
230 break; 230 break;
231 case SNDRV_PCM_TRIGGER_RESUME: 231 case SNDRV_PCM_TRIGGER_RESUME:
@@ -252,21 +252,16 @@ static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream,
252 SAIMR &= ~SAIMR_RFS; 252 SAIMR &= ~SAIMR_RFS;
253 } 253 }
254 254
255 if (SACR1 & (SACR1_DREC | SACR1_DRPL)) { 255 if ((SACR1 & (SACR1_DREC | SACR1_DRPL)) == (SACR1_DREC | SACR1_DRPL)) {
256 SACR0 &= ~SACR0_ENB; 256 SACR0 &= ~SACR0_ENB;
257 pxa_i2s_wait(); 257 pxa_i2s_wait();
258 clk_disable(clk_i2s); 258 clk_disable(clk_i2s);
259 } 259 }
260
261 clk_put(clk_i2s);
262} 260}
263 261
264#ifdef CONFIG_PM 262#ifdef CONFIG_PM
265static int pxa2xx_i2s_suspend(struct snd_soc_dai *dai) 263static int pxa2xx_i2s_suspend(struct snd_soc_dai *dai)
266{ 264{
267 if (!dai->active)
268 return 0;
269
270 /* store registers */ 265 /* store registers */
271 pxa_i2s.sacr0 = SACR0; 266 pxa_i2s.sacr0 = SACR0;
272 pxa_i2s.sacr1 = SACR1; 267 pxa_i2s.sacr1 = SACR1;
@@ -281,16 +276,14 @@ static int pxa2xx_i2s_suspend(struct snd_soc_dai *dai)
281 276
282static int pxa2xx_i2s_resume(struct snd_soc_dai *dai) 277static int pxa2xx_i2s_resume(struct snd_soc_dai *dai)
283{ 278{
284 if (!dai->active)
285 return 0;
286
287 pxa_i2s_wait(); 279 pxa_i2s_wait();
288 280
289 SACR0 = pxa_i2s.sacr0 &= ~SACR0_ENB; 281 SACR0 = pxa_i2s.sacr0 & ~SACR0_ENB;
290 SACR1 = pxa_i2s.sacr1; 282 SACR1 = pxa_i2s.sacr1;
291 SAIMR = pxa_i2s.saimr; 283 SAIMR = pxa_i2s.saimr;
292 SADIV = pxa_i2s.sadiv; 284 SADIV = pxa_i2s.sadiv;
293 SACR0 |= SACR0_ENB; 285
286 SACR0 = pxa_i2s.sacr0;
294 287
295 return 0; 288 return 0;
296} 289}
@@ -329,6 +322,7 @@ struct snd_soc_dai pxa_i2s_dai = {
329 .rates = PXA2XX_I2S_RATES, 322 .rates = PXA2XX_I2S_RATES,
330 .formats = SNDRV_PCM_FMTBIT_S16_LE,}, 323 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
331 .ops = &pxa_i2s_dai_ops, 324 .ops = &pxa_i2s_dai_ops,
325 .symmetric_rates = 1,
332}; 326};
333 327
334EXPORT_SYMBOL_GPL(pxa_i2s_dai); 328EXPORT_SYMBOL_GPL(pxa_i2s_dai);
@@ -346,6 +340,19 @@ static int pxa2xx_i2s_probe(struct platform_device *dev)
346 if (ret != 0) 340 if (ret != 0)
347 clk_put(clk_i2s); 341 clk_put(clk_i2s);
348 342
343 /*
344 * PXA Developer's Manual:
345 * If SACR0[ENB] is toggled in the middle of a normal operation,
346 * the SACR0[RST] bit must also be set and cleared to reset all
347 * I2S controller registers.
348 */
349 SACR0 = SACR0_RST;
350 SACR0 = 0;
351 /* Make sure RPL and REC are disabled */
352 SACR1 = SACR1_DRPL | SACR1_DREC;
353 /* Along with FIFO servicing */
354 SAIMR &= ~(SAIMR_RFS | SAIMR_TFS);
355
349 return ret; 356 return ret;
350} 357}
351 358
diff --git a/sound/soc/s3c24xx/neo1973_wm8753.c b/sound/soc/s3c24xx/neo1973_wm8753.c
index 289fadf60b10..906709e6dd5f 100644
--- a/sound/soc/s3c24xx/neo1973_wm8753.c
+++ b/sound/soc/s3c24xx/neo1973_wm8753.c
@@ -345,9 +345,11 @@ static void lm4857_write_regs(void)
345static int lm4857_get_reg(struct snd_kcontrol *kcontrol, 345static int lm4857_get_reg(struct snd_kcontrol *kcontrol,
346 struct snd_ctl_elem_value *ucontrol) 346 struct snd_ctl_elem_value *ucontrol)
347{ 347{
348 int reg = kcontrol->private_value & 0xFF; 348 struct soc_mixer_control *mc =
349 int shift = (kcontrol->private_value >> 8) & 0x0F; 349 (struct soc_mixer_control *)kcontrol->private_value;
350 int mask = (kcontrol->private_value >> 16) & 0xFF; 350 int reg = mc->reg;
351 int shift = mc->shift;
352 int mask = mc->max;
351 353
352 pr_debug("Entered %s\n", __func__); 354 pr_debug("Entered %s\n", __func__);
353 355
@@ -358,9 +360,11 @@ static int lm4857_get_reg(struct snd_kcontrol *kcontrol,
358static int lm4857_set_reg(struct snd_kcontrol *kcontrol, 360static int lm4857_set_reg(struct snd_kcontrol *kcontrol,
359 struct snd_ctl_elem_value *ucontrol) 361 struct snd_ctl_elem_value *ucontrol)
360{ 362{
361 int reg = kcontrol->private_value & 0xFF; 363 struct soc_mixer_control *mc =
362 int shift = (kcontrol->private_value >> 8) & 0x0F; 364 (struct soc_mixer_control *)kcontrol->private_value;
363 int mask = (kcontrol->private_value >> 16) & 0xFF; 365 int reg = mc->reg;
366 int shift = mc->shift;
367 int mask = mc->max;
364 368
365 if (((lm4857_regs[reg] >> shift) & mask) == 369 if (((lm4857_regs[reg] >> shift) & mask) ==
366 ucontrol->value.integer.value[0]) 370 ucontrol->value.integer.value[0])
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c
index ab680aac3fcb..1a283170ca92 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ b/sound/soc/s3c24xx/s3c-i2s-v2.c
@@ -37,6 +37,20 @@
37 37
38#include "s3c-i2s-v2.h" 38#include "s3c-i2s-v2.h"
39 39
40#undef S3C_IIS_V2_SUPPORTED
41
42#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
43#define S3C_IIS_V2_SUPPORTED
44#endif
45
46#ifdef CONFIG_PLAT_S3C64XX
47#define S3C_IIS_V2_SUPPORTED
48#endif
49
50#ifndef S3C_IIS_V2_SUPPORTED
51#error Unsupported CPU model
52#endif
53
40#define S3C2412_I2S_DEBUG_CON 0 54#define S3C2412_I2S_DEBUG_CON 0
41 55
42static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai) 56static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
@@ -75,7 +89,7 @@ static inline void dbg_showcon(const char *fn, u32 con)
75 89
76 90
77/* Turn on or off the transmission path. */ 91/* Turn on or off the transmission path. */
78void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on) 92static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
79{ 93{
80 void __iomem *regs = i2s->regs; 94 void __iomem *regs = i2s->regs;
81 u32 fic, con, mod; 95 u32 fic, con, mod;
@@ -105,7 +119,9 @@ void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
105 break; 119 break;
106 120
107 default: 121 default:
108 dev_err(i2s->dev, "TXEN: Invalid MODE in IISMOD\n"); 122 dev_err(i2s->dev, "TXEN: Invalid MODE %x in IISMOD\n",
123 mod & S3C2412_IISMOD_MODE_MASK);
124 break;
109 } 125 }
110 126
111 writel(con, regs + S3C2412_IISCON); 127 writel(con, regs + S3C2412_IISCON);
@@ -132,7 +148,9 @@ void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
132 break; 148 break;
133 149
134 default: 150 default:
135 dev_err(i2s->dev, "TXDIS: Invalid MODE in IISMOD\n"); 151 dev_err(i2s->dev, "TXDIS: Invalid MODE %x in IISMOD\n",
152 mod & S3C2412_IISMOD_MODE_MASK);
153 break;
136 } 154 }
137 155
138 writel(mod, regs + S3C2412_IISMOD); 156 writel(mod, regs + S3C2412_IISMOD);
@@ -143,9 +161,8 @@ void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
143 dbg_showcon(__func__, con); 161 dbg_showcon(__func__, con);
144 pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); 162 pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
145} 163}
146EXPORT_SYMBOL_GPL(s3c2412_snd_txctrl);
147 164
148void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on) 165static void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on)
149{ 166{
150 void __iomem *regs = i2s->regs; 167 void __iomem *regs = i2s->regs;
151 u32 fic, con, mod; 168 u32 fic, con, mod;
@@ -175,7 +192,8 @@ void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on)
175 break; 192 break;
176 193
177 default: 194 default:
178 dev_err(i2s->dev, "RXEN: Invalid MODE in IISMOD\n"); 195 dev_err(i2s->dev, "RXEN: Invalid MODE %x in IISMOD\n",
196 mod & S3C2412_IISMOD_MODE_MASK);
179 } 197 }
180 198
181 writel(mod, regs + S3C2412_IISMOD); 199 writel(mod, regs + S3C2412_IISMOD);
@@ -199,7 +217,8 @@ void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on)
199 break; 217 break;
200 218
201 default: 219 default:
202 dev_err(i2s->dev, "RXEN: Invalid MODE in IISMOD\n"); 220 dev_err(i2s->dev, "RXDIS: Invalid MODE %x in IISMOD\n",
221 mod & S3C2412_IISMOD_MODE_MASK);
203 } 222 }
204 223
205 writel(con, regs + S3C2412_IISCON); 224 writel(con, regs + S3C2412_IISCON);
@@ -209,7 +228,6 @@ void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on)
209 fic = readl(regs + S3C2412_IISFIC); 228 fic = readl(regs + S3C2412_IISFIC);
210 pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); 229 pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
211} 230}
212EXPORT_SYMBOL_GPL(s3c2412_snd_rxctrl);
213 231
214/* 232/*
215 * Wait for the LR signal to allow synchronisation to the L/R clock 233 * Wait for the LR signal to allow synchronisation to the L/R clock
@@ -266,7 +284,7 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
266 */ 284 */
267#define IISMOD_MASTER_MASK (1 << 11) 285#define IISMOD_MASTER_MASK (1 << 11)
268#define IISMOD_SLAVE (1 << 11) 286#define IISMOD_SLAVE (1 << 11)
269#define IISMOD_MASTER (0x0) 287#define IISMOD_MASTER (0 << 11)
270#endif 288#endif
271 289
272 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 290 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -281,7 +299,7 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
281 iismod |= IISMOD_MASTER; 299 iismod |= IISMOD_MASTER;
282 break; 300 break;
283 default: 301 default:
284 pr_debug("unknwon master/slave format\n"); 302 pr_err("unknwon master/slave format\n");
285 return -EINVAL; 303 return -EINVAL;
286 } 304 }
287 305
@@ -298,7 +316,7 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
298 iismod |= S3C2412_IISMOD_SDF_IIS; 316 iismod |= S3C2412_IISMOD_SDF_IIS;
299 break; 317 break;
300 default: 318 default:
301 pr_debug("Unknown data format\n"); 319 pr_err("Unknown data format\n");
302 return -EINVAL; 320 return -EINVAL;
303 } 321 }
304 322
@@ -327,6 +345,7 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
327 iismod = readl(i2s->regs + S3C2412_IISMOD); 345 iismod = readl(i2s->regs + S3C2412_IISMOD);
328 pr_debug("%s: r: IISMOD: %x\n", __func__, iismod); 346 pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
329 347
348#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
330 switch (params_format(params)) { 349 switch (params_format(params)) {
331 case SNDRV_PCM_FORMAT_S8: 350 case SNDRV_PCM_FORMAT_S8:
332 iismod |= S3C2412_IISMOD_8BIT; 351 iismod |= S3C2412_IISMOD_8BIT;
@@ -335,6 +354,25 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
335 iismod &= ~S3C2412_IISMOD_8BIT; 354 iismod &= ~S3C2412_IISMOD_8BIT;
336 break; 355 break;
337 } 356 }
357#endif
358
359#ifdef CONFIG_PLAT_S3C64XX
360 iismod &= ~0x606;
361 /* Sample size */
362 switch (params_format(params)) {
363 case SNDRV_PCM_FORMAT_S8:
364 /* 8 bit sample, 16fs BCLK */
365 iismod |= 0x2004;
366 break;
367 case SNDRV_PCM_FORMAT_S16_LE:
368 /* 16 bit sample, 32fs BCLK */
369 break;
370 case SNDRV_PCM_FORMAT_S24_LE:
371 /* 24 bit sample, 48fs BCLK */
372 iismod |= 0x4002;
373 break;
374 }
375#endif
338 376
339 writel(iismod, i2s->regs + S3C2412_IISMOD); 377 writel(iismod, i2s->regs + S3C2412_IISMOD);
340 pr_debug("%s: w: IISMOD: %x\n", __func__, iismod); 378 pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
@@ -489,6 +527,8 @@ int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
489 unsigned int best_rate = 0; 527 unsigned int best_rate = 0;
490 unsigned int best_deviation = INT_MAX; 528 unsigned int best_deviation = INT_MAX;
491 529
530 pr_debug("Input clock rate %ldHz\n", clkrate);
531
492 if (fstab == NULL) 532 if (fstab == NULL)
493 fstab = iis_fs_tab; 533 fstab = iis_fs_tab;
494 534
@@ -507,7 +547,7 @@ int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
507 actual = clkrate / (fsdiv * div); 547 actual = clkrate / (fsdiv * div);
508 deviation = actual - rate; 548 deviation = actual - rate;
509 549
510 printk(KERN_DEBUG "%dfs: div %d => result %d, deviation %d\n", 550 printk(KERN_DEBUG "%ufs: div %u => result %u, deviation %d\n",
511 fsdiv, div, actual, deviation); 551 fsdiv, div, actual, deviation);
512 552
513 deviation = abs(deviation); 553 deviation = abs(deviation);
@@ -523,7 +563,7 @@ int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
523 break; 563 break;
524 } 564 }
525 565
526 printk(KERN_DEBUG "best: fs=%d, div=%d, rate=%d\n", 566 printk(KERN_DEBUG "best: fs=%u, div=%u, rate=%u\n",
527 best_fs, best_div, best_rate); 567 best_fs, best_div, best_rate);
528 568
529 info->fs_div = best_fs; 569 info->fs_div = best_fs;
@@ -539,12 +579,31 @@ int s3c_i2sv2_probe(struct platform_device *pdev,
539 unsigned long base) 579 unsigned long base)
540{ 580{
541 struct device *dev = &pdev->dev; 581 struct device *dev = &pdev->dev;
582 unsigned int iismod;
542 583
543 i2s->dev = dev; 584 i2s->dev = dev;
544 585
545 /* record our i2s structure for later use in the callbacks */ 586 /* record our i2s structure for later use in the callbacks */
546 dai->private_data = i2s; 587 dai->private_data = i2s;
547 588
589 if (!base) {
590 struct resource *res = platform_get_resource(pdev,
591 IORESOURCE_MEM,
592 0);
593 if (!res) {
594 dev_err(dev, "Unable to get register resource\n");
595 return -ENXIO;
596 }
597
598 if (!request_mem_region(res->start, resource_size(res),
599 "s3c64xx-i2s-v4")) {
600 dev_err(dev, "Unable to request register region\n");
601 return -EBUSY;
602 }
603
604 base = res->start;
605 }
606
548 i2s->regs = ioremap(base, 0x100); 607 i2s->regs = ioremap(base, 0x100);
549 if (i2s->regs == NULL) { 608 if (i2s->regs == NULL) {
550 dev_err(dev, "cannot ioremap registers\n"); 609 dev_err(dev, "cannot ioremap registers\n");
@@ -560,12 +619,16 @@ int s3c_i2sv2_probe(struct platform_device *pdev,
560 619
561 clk_enable(i2s->iis_pclk); 620 clk_enable(i2s->iis_pclk);
562 621
622 /* Mark ourselves as in TXRX mode so we can run through our cleanup
623 * process without warnings. */
624 iismod = readl(i2s->regs + S3C2412_IISMOD);
625 iismod |= S3C2412_IISMOD_MODE_TXRX;
626 writel(iismod, i2s->regs + S3C2412_IISMOD);
563 s3c2412_snd_txctrl(i2s, 0); 627 s3c2412_snd_txctrl(i2s, 0);
564 s3c2412_snd_rxctrl(i2s, 0); 628 s3c2412_snd_rxctrl(i2s, 0);
565 629
566 return 0; 630 return 0;
567} 631}
568
569EXPORT_SYMBOL_GPL(s3c_i2sv2_probe); 632EXPORT_SYMBOL_GPL(s3c_i2sv2_probe);
570 633
571#ifdef CONFIG_PM 634#ifdef CONFIG_PM
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.c b/sound/soc/s3c24xx/s3c2412-i2s.c
index b7e0b3f0bfc8..168a088ba761 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.c
+++ b/sound/soc/s3c24xx/s3c2412-i2s.c
@@ -120,7 +120,7 @@ static int s3c2412_i2s_probe(struct platform_device *pdev,
120 120
121 s3c2412_i2s.iis_cclk = clk_get(&pdev->dev, "i2sclk"); 121 s3c2412_i2s.iis_cclk = clk_get(&pdev->dev, "i2sclk");
122 if (s3c2412_i2s.iis_cclk == NULL) { 122 if (s3c2412_i2s.iis_cclk == NULL) {
123 pr_debug("failed to get i2sclk clock\n"); 123 pr_err("failed to get i2sclk clock\n");
124 iounmap(s3c2412_i2s.regs); 124 iounmap(s3c2412_i2s.regs);
125 return -ENODEV; 125 return -ENODEV;
126 } 126 }
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c
index 33c5de7e255f..3c06c401d0fb 100644
--- a/sound/soc/s3c24xx/s3c64xx-i2s.c
+++ b/sound/soc/s3c24xx/s3c64xx-i2s.c
@@ -108,48 +108,19 @@ static int s3c64xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
108 return 0; 108 return 0;
109} 109}
110 110
111 111struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai)
112unsigned long s3c64xx_i2s_get_clockrate(struct snd_soc_dai *dai)
113{ 112{
114 struct s3c_i2sv2_info *i2s = to_info(dai); 113 struct s3c_i2sv2_info *i2s = to_info(dai);
115 114
116 return clk_get_rate(i2s->iis_cclk); 115 return i2s->iis_cclk;
117} 116}
118EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clockrate); 117EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clock);
119 118
120static int s3c64xx_i2s_probe(struct platform_device *pdev, 119static int s3c64xx_i2s_probe(struct platform_device *pdev,
121 struct snd_soc_dai *dai) 120 struct snd_soc_dai *dai)
122{ 121{
123 struct device *dev = &pdev->dev;
124 struct s3c_i2sv2_info *i2s;
125 int ret;
126
127 dev_dbg(dev, "%s: probing dai %d\n", __func__, pdev->id);
128
129 if (pdev->id < 0 || pdev->id > ARRAY_SIZE(s3c64xx_i2s)) {
130 dev_err(dev, "id %d out of range\n", pdev->id);
131 return -EINVAL;
132 }
133
134 i2s = &s3c64xx_i2s[pdev->id];
135
136 ret = s3c_i2sv2_probe(pdev, dai, i2s,
137 pdev->id ? S3C64XX_PA_IIS1 : S3C64XX_PA_IIS0);
138 if (ret)
139 return ret;
140
141 i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
142 i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
143
144 i2s->iis_cclk = clk_get(dev, "audio-bus");
145 if (IS_ERR(i2s->iis_cclk)) {
146 dev_err(dev, "failed to get audio-bus");
147 iounmap(i2s->regs);
148 return -ENODEV;
149 }
150
151 /* configure GPIO for i2s port */ 122 /* configure GPIO for i2s port */
152 switch (pdev->id) { 123 switch (dai->id) {
153 case 0: 124 case 0:
154 s3c_gpio_cfgpin(S3C64XX_GPD(0), S3C64XX_GPD0_I2S0_CLK); 125 s3c_gpio_cfgpin(S3C64XX_GPD(0), S3C64XX_GPD0_I2S0_CLK);
155 s3c_gpio_cfgpin(S3C64XX_GPD(1), S3C64XX_GPD1_I2S0_CDCLK); 126 s3c_gpio_cfgpin(S3C64XX_GPD(1), S3C64XX_GPD1_I2S0_CDCLK);
@@ -175,41 +146,122 @@ static int s3c64xx_i2s_probe(struct platform_device *pdev,
175 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) 146 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
176 147
177#define S3C64XX_I2S_FMTS \ 148#define S3C64XX_I2S_FMTS \
178 (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE) 149 (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
150 SNDRV_PCM_FMTBIT_S24_LE)
179 151
180static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops = { 152static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops = {
181 .set_sysclk = s3c64xx_i2s_set_sysclk, 153 .set_sysclk = s3c64xx_i2s_set_sysclk,
182}; 154};
183 155
184struct snd_soc_dai s3c64xx_i2s_dai = { 156struct snd_soc_dai s3c64xx_i2s_dai[] = {
185 .name = "s3c64xx-i2s", 157 {
186 .id = 0, 158 .name = "s3c64xx-i2s",
187 .probe = s3c64xx_i2s_probe, 159 .id = 0,
188 .playback = { 160 .probe = s3c64xx_i2s_probe,
189 .channels_min = 2, 161 .playback = {
190 .channels_max = 2, 162 .channels_min = 2,
191 .rates = S3C64XX_I2S_RATES, 163 .channels_max = 2,
192 .formats = S3C64XX_I2S_FMTS, 164 .rates = S3C64XX_I2S_RATES,
165 .formats = S3C64XX_I2S_FMTS,
166 },
167 .capture = {
168 .channels_min = 2,
169 .channels_max = 2,
170 .rates = S3C64XX_I2S_RATES,
171 .formats = S3C64XX_I2S_FMTS,
172 },
173 .ops = &s3c64xx_i2s_dai_ops,
174 .symmetric_rates = 1,
193 }, 175 },
194 .capture = { 176 {
195 .channels_min = 2, 177 .name = "s3c64xx-i2s",
196 .channels_max = 2, 178 .id = 1,
197 .rates = S3C64XX_I2S_RATES, 179 .probe = s3c64xx_i2s_probe,
198 .formats = S3C64XX_I2S_FMTS, 180 .playback = {
181 .channels_min = 2,
182 .channels_max = 2,
183 .rates = S3C64XX_I2S_RATES,
184 .formats = S3C64XX_I2S_FMTS,
185 },
186 .capture = {
187 .channels_min = 2,
188 .channels_max = 2,
189 .rates = S3C64XX_I2S_RATES,
190 .formats = S3C64XX_I2S_FMTS,
191 },
192 .ops = &s3c64xx_i2s_dai_ops,
193 .symmetric_rates = 1,
199 }, 194 },
200 .ops = &s3c64xx_i2s_dai_ops,
201}; 195};
202EXPORT_SYMBOL_GPL(s3c64xx_i2s_dai); 196EXPORT_SYMBOL_GPL(s3c64xx_i2s_dai);
203 197
198static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
199{
200 struct s3c_i2sv2_info *i2s;
201 struct snd_soc_dai *dai;
202 int ret;
203
204 if (pdev->id >= ARRAY_SIZE(s3c64xx_i2s)) {
205 dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
206 return -EINVAL;
207 }
208
209 i2s = &s3c64xx_i2s[pdev->id];
210 dai = &s3c64xx_i2s_dai[pdev->id];
211 dai->dev = &pdev->dev;
212
213 i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
214 i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
215
216 i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus");
217 if (IS_ERR(i2s->iis_cclk)) {
218 dev_err(&pdev->dev, "failed to get audio-bus\n");
219 ret = PTR_ERR(i2s->iis_cclk);
220 goto err;
221 }
222
223 ret = s3c_i2sv2_probe(pdev, dai, i2s, 0);
224 if (ret)
225 goto err_clk;
226
227 ret = s3c_i2sv2_register_dai(dai);
228 if (ret != 0)
229 goto err_i2sv2;
230
231 return 0;
232
233err_i2sv2:
234 /* Not implemented for I2Sv2 core yet */
235err_clk:
236 clk_put(i2s->iis_cclk);
237err:
238 return ret;
239}
240
241static __devexit int s3c64xx_iis_dev_remove(struct platform_device *pdev)
242{
243 dev_err(&pdev->dev, "Device removal not yet supported\n");
244 return 0;
245}
246
247static struct platform_driver s3c64xx_iis_driver = {
248 .probe = s3c64xx_iis_dev_probe,
249 .remove = s3c64xx_iis_dev_remove,
250 .driver = {
251 .name = "s3c64xx-iis",
252 .owner = THIS_MODULE,
253 },
254};
255
204static int __init s3c64xx_i2s_init(void) 256static int __init s3c64xx_i2s_init(void)
205{ 257{
206 return s3c_i2sv2_register_dai(&s3c64xx_i2s_dai); 258 return platform_driver_register(&s3c64xx_iis_driver);
207} 259}
208module_init(s3c64xx_i2s_init); 260module_init(s3c64xx_i2s_init);
209 261
210static void __exit s3c64xx_i2s_exit(void) 262static void __exit s3c64xx_i2s_exit(void)
211{ 263{
212 snd_soc_unregister_dai(&s3c64xx_i2s_dai); 264 platform_driver_unregister(&s3c64xx_iis_driver);
213} 265}
214module_exit(s3c64xx_i2s_exit); 266module_exit(s3c64xx_i2s_exit);
215 267
@@ -217,6 +269,3 @@ module_exit(s3c64xx_i2s_exit);
217MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); 269MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
218MODULE_DESCRIPTION("S3C64XX I2S SoC Interface"); 270MODULE_DESCRIPTION("S3C64XX I2S SoC Interface");
219MODULE_LICENSE("GPL"); 271MODULE_LICENSE("GPL");
220
221
222
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.h b/sound/soc/s3c24xx/s3c64xx-i2s.h
index b7ffe3c38b66..02148cee2613 100644
--- a/sound/soc/s3c24xx/s3c64xx-i2s.h
+++ b/sound/soc/s3c24xx/s3c64xx-i2s.h
@@ -15,6 +15,8 @@
15#ifndef __SND_SOC_S3C24XX_S3C64XX_I2S_H 15#ifndef __SND_SOC_S3C24XX_S3C64XX_I2S_H
16#define __SND_SOC_S3C24XX_S3C64XX_I2S_H __FILE__ 16#define __SND_SOC_S3C24XX_S3C64XX_I2S_H __FILE__
17 17
18struct clk;
19
18#include "s3c-i2s-v2.h" 20#include "s3c-i2s-v2.h"
19 21
20#define S3C64XX_DIV_BCLK S3C_I2SV2_DIV_BCLK 22#define S3C64XX_DIV_BCLK S3C_I2SV2_DIV_BCLK
@@ -24,8 +26,8 @@
24#define S3C64XX_CLKSRC_PCLK (0) 26#define S3C64XX_CLKSRC_PCLK (0)
25#define S3C64XX_CLKSRC_MUX (1) 27#define S3C64XX_CLKSRC_MUX (1)
26 28
27extern struct snd_soc_dai s3c64xx_i2s_dai; 29extern struct snd_soc_dai s3c64xx_i2s_dai[];
28 30
29extern unsigned long s3c64xx_i2s_get_clockrate(struct snd_soc_dai *cpu_dai); 31extern struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai);
30 32
31#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */ 33#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */
diff --git a/sound/soc/s6000/Kconfig b/sound/soc/s6000/Kconfig
new file mode 100644
index 000000000000..c74eb3d4a47c
--- /dev/null
+++ b/sound/soc/s6000/Kconfig
@@ -0,0 +1,19 @@
1config SND_S6000_SOC
2 tristate "SoC Audio for the Stretch s6000 family"
3 depends on XTENSA_VARIANT_S6000
4 help
5 Say Y or M if you want to add support for codecs attached to
6 s6000 family chips. You will also need to select the platform
7 to support below.
8
9config SND_S6000_SOC_I2S
10 tristate
11
12config SND_S6000_SOC_S6IPCAM
13 tristate "SoC Audio support for Stretch 6105 IP Camera"
14 depends on SND_S6000_SOC && XTENSA_PLATFORM_S6105
15 select SND_S6000_SOC_I2S
16 select SND_SOC_TLV320AIC3X
17 help
18 Say Y if you want to add support for SoC audio on the
19 Stretch s6105 IP Camera Reference Design.
diff --git a/sound/soc/s6000/Makefile b/sound/soc/s6000/Makefile
new file mode 100644
index 000000000000..7a613612e010
--- /dev/null
+++ b/sound/soc/s6000/Makefile
@@ -0,0 +1,11 @@
1# s6000 Platform Support
2snd-soc-s6000-objs := s6000-pcm.o
3snd-soc-s6000-i2s-objs := s6000-i2s.o
4
5obj-$(CONFIG_SND_S6000_SOC) += snd-soc-s6000.o
6obj-$(CONFIG_SND_S6000_SOC_I2S) += snd-soc-s6000-i2s.o
7
8# s6105 Machine Support
9snd-soc-s6ipcam-objs := s6105-ipcam.o
10
11obj-$(CONFIG_SND_S6000_SOC_S6IPCAM) += snd-soc-s6ipcam.o
diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c
new file mode 100644
index 000000000000..c5cda187ecab
--- /dev/null
+++ b/sound/soc/s6000/s6000-i2s.c
@@ -0,0 +1,629 @@
1/*
2 * ALSA SoC I2S Audio Layer for the Stretch S6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/delay.h>
16#include <linux/clk.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/pcm_params.h>
23#include <sound/initval.h>
24#include <sound/soc.h>
25
26#include "s6000-i2s.h"
27#include "s6000-pcm.h"
28
29struct s6000_i2s_dev {
30 dma_addr_t sifbase;
31 u8 __iomem *scbbase;
32 unsigned int wide;
33 unsigned int channel_in;
34 unsigned int channel_out;
35 unsigned int lines_in;
36 unsigned int lines_out;
37 struct s6000_pcm_dma_params dma_params;
38};
39
40#define S6_I2S_INTERRUPT_STATUS 0x00
41#define S6_I2S_INT_OVERRUN 1
42#define S6_I2S_INT_UNDERRUN 2
43#define S6_I2S_INT_ALIGNMENT 4
44#define S6_I2S_INTERRUPT_ENABLE 0x04
45#define S6_I2S_INTERRUPT_RAW 0x08
46#define S6_I2S_INTERRUPT_CLEAR 0x0C
47#define S6_I2S_INTERRUPT_SET 0x10
48#define S6_I2S_MODE 0x20
49#define S6_I2S_DUAL 0
50#define S6_I2S_WIDE 1
51#define S6_I2S_TX_DEFAULT 0x24
52#define S6_I2S_DATA_CFG(c) (0x40 + 0x10 * (c))
53#define S6_I2S_IN 0
54#define S6_I2S_OUT 1
55#define S6_I2S_UNUSED 2
56#define S6_I2S_INTERFACE_CFG(c) (0x44 + 0x10 * (c))
57#define S6_I2S_DIV_MASK 0x001fff
58#define S6_I2S_16BIT 0x000000
59#define S6_I2S_20BIT 0x002000
60#define S6_I2S_24BIT 0x004000
61#define S6_I2S_32BIT 0x006000
62#define S6_I2S_BITS_MASK 0x006000
63#define S6_I2S_MEM_16BIT 0x000000
64#define S6_I2S_MEM_32BIT 0x008000
65#define S6_I2S_MEM_MASK 0x008000
66#define S6_I2S_CHANNELS_SHIFT 16
67#define S6_I2S_CHANNELS_MASK 0x030000
68#define S6_I2S_SCK_IN 0x000000
69#define S6_I2S_SCK_OUT 0x040000
70#define S6_I2S_SCK_DIR 0x040000
71#define S6_I2S_WS_IN 0x000000
72#define S6_I2S_WS_OUT 0x080000
73#define S6_I2S_WS_DIR 0x080000
74#define S6_I2S_LEFT_FIRST 0x000000
75#define S6_I2S_RIGHT_FIRST 0x100000
76#define S6_I2S_FIRST 0x100000
77#define S6_I2S_CUR_SCK 0x200000
78#define S6_I2S_CUR_WS 0x400000
79#define S6_I2S_ENABLE(c) (0x48 + 0x10 * (c))
80#define S6_I2S_DISABLE_IF 0x02
81#define S6_I2S_ENABLE_IF 0x03
82#define S6_I2S_IS_BUSY 0x04
83#define S6_I2S_DMA_ACTIVE 0x08
84#define S6_I2S_IS_ENABLED 0x10
85
86#define S6_I2S_NUM_LINES 4
87
88#define S6_I2S_SIF_PORT0 0x0000000
89#define S6_I2S_SIF_PORT1 0x0000080 /* docs say 0x0000010 */
90
91static inline void s6_i2s_write_reg(struct s6000_i2s_dev *dev, int reg, u32 val)
92{
93 writel(val, dev->scbbase + reg);
94}
95
96static inline u32 s6_i2s_read_reg(struct s6000_i2s_dev *dev, int reg)
97{
98 return readl(dev->scbbase + reg);
99}
100
101static inline void s6_i2s_mod_reg(struct s6000_i2s_dev *dev, int reg,
102 u32 mask, u32 val)
103{
104 val ^= s6_i2s_read_reg(dev, reg) & ~mask;
105 s6_i2s_write_reg(dev, reg, val);
106}
107
108static void s6000_i2s_start_channel(struct s6000_i2s_dev *dev, int channel)
109{
110 int i, j, cur, prev;
111
112 /*
113 * Wait for WCLK to toggle 5 times before enabling the channel
114 * s6000 Family Datasheet 3.6.4:
115 * "At least two cycles of WS must occur between commands
116 * to disable or enable the interface"
117 */
118 j = 0;
119 prev = ~S6_I2S_CUR_WS;
120 for (i = 1000000; --i && j < 6; ) {
121 cur = s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(channel))
122 & S6_I2S_CUR_WS;
123 if (prev != cur) {
124 prev = cur;
125 j++;
126 }
127 }
128 if (j < 6)
129 printk(KERN_WARNING "s6000-i2s: timeout waiting for WCLK\n");
130
131 s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_ENABLE_IF);
132}
133
134static void s6000_i2s_stop_channel(struct s6000_i2s_dev *dev, int channel)
135{
136 s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_DISABLE_IF);
137}
138
139static void s6000_i2s_start(struct snd_pcm_substream *substream)
140{
141 struct snd_soc_pcm_runtime *rtd = substream->private_data;
142 struct s6000_i2s_dev *dev = rtd->dai->cpu_dai->private_data;
143 int channel;
144
145 channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
146 dev->channel_out : dev->channel_in;
147
148 s6000_i2s_start_channel(dev, channel);
149}
150
151static void s6000_i2s_stop(struct snd_pcm_substream *substream)
152{
153 struct snd_soc_pcm_runtime *rtd = substream->private_data;
154 struct s6000_i2s_dev *dev = rtd->dai->cpu_dai->private_data;
155 int channel;
156
157 channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
158 dev->channel_out : dev->channel_in;
159
160 s6000_i2s_stop_channel(dev, channel);
161}
162
163static int s6000_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
164 int after)
165{
166 switch (cmd) {
167 case SNDRV_PCM_TRIGGER_START:
168 case SNDRV_PCM_TRIGGER_RESUME:
169 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
170 if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) ^ !after)
171 s6000_i2s_start(substream);
172 break;
173 case SNDRV_PCM_TRIGGER_STOP:
174 case SNDRV_PCM_TRIGGER_SUSPEND:
175 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
176 if (!after)
177 s6000_i2s_stop(substream);
178 }
179 return 0;
180}
181
182static unsigned int s6000_i2s_int_sources(struct s6000_i2s_dev *dev)
183{
184 unsigned int pending;
185 pending = s6_i2s_read_reg(dev, S6_I2S_INTERRUPT_RAW);
186 pending &= S6_I2S_INT_ALIGNMENT |
187 S6_I2S_INT_UNDERRUN |
188 S6_I2S_INT_OVERRUN;
189 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR, pending);
190
191 return pending;
192}
193
194static unsigned int s6000_i2s_check_xrun(struct snd_soc_dai *cpu_dai)
195{
196 struct s6000_i2s_dev *dev = cpu_dai->private_data;
197 unsigned int errors;
198 unsigned int ret;
199
200 errors = s6000_i2s_int_sources(dev);
201 if (likely(!errors))
202 return 0;
203
204 ret = 0;
205 if (errors & S6_I2S_INT_ALIGNMENT)
206 printk(KERN_ERR "s6000-i2s: WCLK misaligned\n");
207 if (errors & S6_I2S_INT_UNDERRUN)
208 ret |= 1 << SNDRV_PCM_STREAM_PLAYBACK;
209 if (errors & S6_I2S_INT_OVERRUN)
210 ret |= 1 << SNDRV_PCM_STREAM_CAPTURE;
211 return ret;
212}
213
214static void s6000_i2s_wait_disabled(struct s6000_i2s_dev *dev)
215{
216 int channel;
217 int n = 50;
218 for (channel = 0; channel < 2; channel++) {
219 while (--n >= 0) {
220 int v = s6_i2s_read_reg(dev, S6_I2S_ENABLE(channel));
221 if ((v & S6_I2S_IS_ENABLED)
222 || !(v & (S6_I2S_DMA_ACTIVE | S6_I2S_IS_BUSY)))
223 break;
224 udelay(20);
225 }
226 }
227 if (n < 0)
228 printk(KERN_WARNING "s6000-i2s: timeout disabling interfaces");
229}
230
231static int s6000_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
232 unsigned int fmt)
233{
234 struct s6000_i2s_dev *dev = cpu_dai->private_data;
235 u32 w;
236
237 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
238 case SND_SOC_DAIFMT_CBM_CFM:
239 w = S6_I2S_SCK_IN | S6_I2S_WS_IN;
240 break;
241 case SND_SOC_DAIFMT_CBS_CFM:
242 w = S6_I2S_SCK_OUT | S6_I2S_WS_IN;
243 break;
244 case SND_SOC_DAIFMT_CBM_CFS:
245 w = S6_I2S_SCK_IN | S6_I2S_WS_OUT;
246 break;
247 case SND_SOC_DAIFMT_CBS_CFS:
248 w = S6_I2S_SCK_OUT | S6_I2S_WS_OUT;
249 break;
250 default:
251 return -EINVAL;
252 }
253
254 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
255 case SND_SOC_DAIFMT_NB_NF:
256 w |= S6_I2S_LEFT_FIRST;
257 break;
258 case SND_SOC_DAIFMT_NB_IF:
259 w |= S6_I2S_RIGHT_FIRST;
260 break;
261 default:
262 return -EINVAL;
263 }
264
265 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(0),
266 S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w);
267 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(1),
268 S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w);
269
270 return 0;
271}
272
273static int s6000_i2s_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
274{
275 struct s6000_i2s_dev *dev = dai->private_data;
276
277 if (!div || (div & 1) || div > (S6_I2S_DIV_MASK + 1) * 2)
278 return -EINVAL;
279
280 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(div_id),
281 S6_I2S_DIV_MASK, div / 2 - 1);
282 return 0;
283}
284
285static int s6000_i2s_hw_params(struct snd_pcm_substream *substream,
286 struct snd_pcm_hw_params *params,
287 struct snd_soc_dai *dai)
288{
289 struct s6000_i2s_dev *dev = dai->private_data;
290 int interf;
291 u32 w = 0;
292
293 if (dev->wide)
294 interf = 0;
295 else {
296 w |= (((params_channels(params) - 2) / 2)
297 << S6_I2S_CHANNELS_SHIFT) & S6_I2S_CHANNELS_MASK;
298 interf = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
299 ? dev->channel_out : dev->channel_in;
300 }
301
302 switch (params_format(params)) {
303 case SNDRV_PCM_FORMAT_S16_LE:
304 w |= S6_I2S_16BIT | S6_I2S_MEM_16BIT;
305 break;
306 case SNDRV_PCM_FORMAT_S32_LE:
307 w |= S6_I2S_32BIT | S6_I2S_MEM_32BIT;
308 break;
309 default:
310 printk(KERN_WARNING "s6000-i2s: unsupported PCM format %x\n",
311 params_format(params));
312 return -EINVAL;
313 }
314
315 if (s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(interf))
316 & S6_I2S_IS_ENABLED) {
317 printk(KERN_ERR "s6000-i2s: interface already enabled\n");
318 return -EBUSY;
319 }
320
321 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(interf),
322 S6_I2S_CHANNELS_MASK|S6_I2S_MEM_MASK|S6_I2S_BITS_MASK,
323 w);
324
325 return 0;
326}
327
328static int s6000_i2s_dai_probe(struct platform_device *pdev,
329 struct snd_soc_dai *dai)
330{
331 struct s6000_i2s_dev *dev = dai->private_data;
332 struct s6000_snd_platform_data *pdata = pdev->dev.platform_data;
333
334 if (!pdata)
335 return -EINVAL;
336
337 dev->wide = pdata->wide;
338 dev->channel_in = pdata->channel_in;
339 dev->channel_out = pdata->channel_out;
340 dev->lines_in = pdata->lines_in;
341 dev->lines_out = pdata->lines_out;
342
343 s6_i2s_write_reg(dev, S6_I2S_MODE,
344 dev->wide ? S6_I2S_WIDE : S6_I2S_DUAL);
345
346 if (dev->wide) {
347 int i;
348
349 if (dev->lines_in + dev->lines_out > S6_I2S_NUM_LINES)
350 return -EINVAL;
351
352 dev->channel_in = 0;
353 dev->channel_out = 1;
354 dai->capture.channels_min = 2 * dev->lines_in;
355 dai->capture.channels_max = dai->capture.channels_min;
356 dai->playback.channels_min = 2 * dev->lines_out;
357 dai->playback.channels_max = dai->playback.channels_min;
358
359 for (i = 0; i < dev->lines_out; i++)
360 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_OUT);
361
362 for (; i < S6_I2S_NUM_LINES - dev->lines_in; i++)
363 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i),
364 S6_I2S_UNUSED);
365
366 for (; i < S6_I2S_NUM_LINES; i++)
367 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_IN);
368 } else {
369 unsigned int cfg[2] = {S6_I2S_UNUSED, S6_I2S_UNUSED};
370
371 if (dev->lines_in > 1 || dev->lines_out > 1)
372 return -EINVAL;
373
374 dai->capture.channels_min = 2 * dev->lines_in;
375 dai->capture.channels_max = 8 * dev->lines_in;
376 dai->playback.channels_min = 2 * dev->lines_out;
377 dai->playback.channels_max = 8 * dev->lines_out;
378
379 if (dev->lines_in)
380 cfg[dev->channel_in] = S6_I2S_IN;
381 if (dev->lines_out)
382 cfg[dev->channel_out] = S6_I2S_OUT;
383
384 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(0), cfg[0]);
385 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(1), cfg[1]);
386 }
387
388 if (dev->lines_out) {
389 if (dev->lines_in) {
390 if (!dev->dma_params.dma_out)
391 return -ENODEV;
392 } else {
393 dev->dma_params.dma_out = dev->dma_params.dma_in;
394 dev->dma_params.dma_in = 0;
395 }
396 }
397 dev->dma_params.sif_in = dev->sifbase + (dev->channel_in ?
398 S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0);
399 dev->dma_params.sif_out = dev->sifbase + (dev->channel_out ?
400 S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0);
401 dev->dma_params.same_rate = pdata->same_rate | pdata->wide;
402 return 0;
403}
404
405#define S6000_I2S_RATES (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 | \
406 SNDRV_PCM_RATE_8000_192000)
407#define S6000_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
408
409static struct snd_soc_dai_ops s6000_i2s_dai_ops = {
410 .set_fmt = s6000_i2s_set_dai_fmt,
411 .set_clkdiv = s6000_i2s_set_clkdiv,
412 .hw_params = s6000_i2s_hw_params,
413};
414
415struct snd_soc_dai s6000_i2s_dai = {
416 .name = "s6000-i2s",
417 .id = 0,
418 .probe = s6000_i2s_dai_probe,
419 .playback = {
420 .channels_min = 2,
421 .channels_max = 8,
422 .formats = S6000_I2S_FORMATS,
423 .rates = S6000_I2S_RATES,
424 .rate_min = 0,
425 .rate_max = 1562500,
426 },
427 .capture = {
428 .channels_min = 2,
429 .channels_max = 8,
430 .formats = S6000_I2S_FORMATS,
431 .rates = S6000_I2S_RATES,
432 .rate_min = 0,
433 .rate_max = 1562500,
434 },
435 .ops = &s6000_i2s_dai_ops,
436}
437EXPORT_SYMBOL_GPL(s6000_i2s_dai);
438
439static int __devinit s6000_i2s_probe(struct platform_device *pdev)
440{
441 struct s6000_i2s_dev *dev;
442 struct resource *scbmem, *sifmem, *region, *dma1, *dma2;
443 u8 __iomem *mmio;
444 int ret;
445
446 scbmem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
447 if (!scbmem) {
448 dev_err(&pdev->dev, "no mem resource?\n");
449 ret = -ENODEV;
450 goto err_release_none;
451 }
452
453 region = request_mem_region(scbmem->start,
454 scbmem->end - scbmem->start + 1,
455 pdev->name);
456 if (!region) {
457 dev_err(&pdev->dev, "I2S SCB region already claimed\n");
458 ret = -EBUSY;
459 goto err_release_none;
460 }
461
462 mmio = ioremap(scbmem->start, scbmem->end - scbmem->start + 1);
463 if (!mmio) {
464 dev_err(&pdev->dev, "can't ioremap SCB region\n");
465 ret = -ENOMEM;
466 goto err_release_scb;
467 }
468
469 sifmem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
470 if (!sifmem) {
471 dev_err(&pdev->dev, "no second mem resource?\n");
472 ret = -ENODEV;
473 goto err_release_map;
474 }
475
476 region = request_mem_region(sifmem->start,
477 sifmem->end - sifmem->start + 1,
478 pdev->name);
479 if (!region) {
480 dev_err(&pdev->dev, "I2S SIF region already claimed\n");
481 ret = -EBUSY;
482 goto err_release_map;
483 }
484
485 dma1 = platform_get_resource(pdev, IORESOURCE_DMA, 0);
486 if (!dma1) {
487 dev_err(&pdev->dev, "no dma resource?\n");
488 ret = -ENODEV;
489 goto err_release_sif;
490 }
491
492 region = request_mem_region(dma1->start, dma1->end - dma1->start + 1,
493 pdev->name);
494 if (!region) {
495 dev_err(&pdev->dev, "I2S DMA region already claimed\n");
496 ret = -EBUSY;
497 goto err_release_sif;
498 }
499
500 dma2 = platform_get_resource(pdev, IORESOURCE_DMA, 1);
501 if (dma2) {
502 region = request_mem_region(dma2->start,
503 dma2->end - dma2->start + 1,
504 pdev->name);
505 if (!region) {
506 dev_err(&pdev->dev,
507 "I2S DMA region already claimed\n");
508 ret = -EBUSY;
509 goto err_release_dma1;
510 }
511 }
512
513 dev = kzalloc(sizeof(struct s6000_i2s_dev), GFP_KERNEL);
514 if (!dev) {
515 ret = -ENOMEM;
516 goto err_release_dma2;
517 }
518
519 s6000_i2s_dai.dev = &pdev->dev;
520 s6000_i2s_dai.private_data = dev;
521 s6000_i2s_dai.dma_data = &dev->dma_params;
522
523 dev->sifbase = sifmem->start;
524 dev->scbbase = mmio;
525
526 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0);
527 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR,
528 S6_I2S_INT_ALIGNMENT |
529 S6_I2S_INT_UNDERRUN |
530 S6_I2S_INT_OVERRUN);
531
532 s6000_i2s_stop_channel(dev, 0);
533 s6000_i2s_stop_channel(dev, 1);
534 s6000_i2s_wait_disabled(dev);
535
536 dev->dma_params.check_xrun = s6000_i2s_check_xrun;
537 dev->dma_params.trigger = s6000_i2s_trigger;
538 dev->dma_params.dma_in = dma1->start;
539 dev->dma_params.dma_out = dma2 ? dma2->start : 0;
540 dev->dma_params.irq = platform_get_irq(pdev, 0);
541 if (dev->dma_params.irq < 0) {
542 dev_err(&pdev->dev, "no irq resource?\n");
543 ret = -ENODEV;
544 goto err_release_dev;
545 }
546
547 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE,
548 S6_I2S_INT_ALIGNMENT |
549 S6_I2S_INT_UNDERRUN |
550 S6_I2S_INT_OVERRUN);
551
552 ret = snd_soc_register_dai(&s6000_i2s_dai);
553 if (ret)
554 goto err_release_dev;
555
556 return 0;
557
558err_release_dev:
559 kfree(dev);
560err_release_dma2:
561 if (dma2)
562 release_mem_region(dma2->start, dma2->end - dma2->start + 1);
563err_release_dma1:
564 release_mem_region(dma1->start, dma1->end - dma1->start + 1);
565err_release_sif:
566 release_mem_region(sifmem->start, (sifmem->end - sifmem->start) + 1);
567err_release_map:
568 iounmap(mmio);
569err_release_scb:
570 release_mem_region(scbmem->start, (scbmem->end - scbmem->start) + 1);
571err_release_none:
572 return ret;
573}
574
575static void __devexit s6000_i2s_remove(struct platform_device *pdev)
576{
577 struct s6000_i2s_dev *dev = s6000_i2s_dai.private_data;
578 struct resource *region;
579 void __iomem *mmio = dev->scbbase;
580
581 snd_soc_unregister_dai(&s6000_i2s_dai);
582
583 s6000_i2s_stop_channel(dev, 0);
584 s6000_i2s_stop_channel(dev, 1);
585
586 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0);
587 s6000_i2s_dai.private_data = 0;
588 kfree(dev);
589
590 region = platform_get_resource(pdev, IORESOURCE_DMA, 0);
591 release_mem_region(region->start, region->end - region->start + 1);
592
593 region = platform_get_resource(pdev, IORESOURCE_DMA, 1);
594 if (region)
595 release_mem_region(region->start,
596 region->end - region->start + 1);
597
598 region = platform_get_resource(pdev, IORESOURCE_MEM, 0);
599 release_mem_region(region->start, (region->end - region->start) + 1);
600
601 iounmap(mmio);
602 region = platform_get_resource(pdev, IORESOURCE_IO, 0);
603 release_mem_region(region->start, (region->end - region->start) + 1);
604}
605
606static struct platform_driver s6000_i2s_driver = {
607 .probe = s6000_i2s_probe,
608 .remove = __devexit_p(s6000_i2s_remove),
609 .driver = {
610 .name = "s6000-i2s",
611 .owner = THIS_MODULE,
612 },
613};
614
615static int __init s6000_i2s_init(void)
616{
617 return platform_driver_register(&s6000_i2s_driver);
618}
619module_init(s6000_i2s_init);
620
621static void __exit s6000_i2s_exit(void)
622{
623 platform_driver_unregister(&s6000_i2s_driver);
624}
625module_exit(s6000_i2s_exit);
626
627MODULE_AUTHOR("Daniel Gloeckner");
628MODULE_DESCRIPTION("Stretch s6000 family I2S SoC Interface");
629MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6000-i2s.h b/sound/soc/s6000/s6000-i2s.h
new file mode 100644
index 000000000000..2375fdfe6dba
--- /dev/null
+++ b/sound/soc/s6000/s6000-i2s.h
@@ -0,0 +1,25 @@
1/*
2 * ALSA SoC I2S Audio Layer for the Stretch s6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _S6000_I2S_H
13#define _S6000_I2S_H
14
15extern struct snd_soc_dai s6000_i2s_dai;
16
17struct s6000_snd_platform_data {
18 int lines_in;
19 int lines_out;
20 int channel_in;
21 int channel_out;
22 int wide;
23 int same_rate;
24};
25#endif
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
new file mode 100644
index 000000000000..83b8028e209d
--- /dev/null
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -0,0 +1,497 @@
1/*
2 * ALSA PCM interface for the Stetch s6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/dma-mapping.h>
17#include <linux/interrupt.h>
18
19#include <sound/core.h>
20#include <sound/pcm.h>
21#include <sound/pcm_params.h>
22#include <sound/soc.h>
23
24#include <asm/dma.h>
25#include <variant/dmac.h>
26
27#include "s6000-pcm.h"
28
29#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
30#define S6_PCM_PREALLOCATE_MAX (2048 * 1024)
31
32static struct snd_pcm_hardware s6000_pcm_hardware = {
33 .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
34 SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
35 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX),
36 .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE),
37 .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 | \
38 SNDRV_PCM_RATE_8000_192000),
39 .rate_min = 0,
40 .rate_max = 1562500,
41 .channels_min = 2,
42 .channels_max = 8,
43 .buffer_bytes_max = 0x7ffffff0,
44 .period_bytes_min = 16,
45 .period_bytes_max = 0xfffff0,
46 .periods_min = 2,
47 .periods_max = 1024, /* no limit */
48 .fifo_size = 0,
49};
50
51struct s6000_runtime_data {
52 spinlock_t lock;
53 int period; /* current DMA period */
54};
55
56static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream)
57{
58 struct snd_pcm_runtime *runtime = substream->runtime;
59 struct s6000_runtime_data *prtd = runtime->private_data;
60 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
61 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
62 int channel;
63 unsigned int period_size;
64 unsigned int dma_offset;
65 dma_addr_t dma_pos;
66 dma_addr_t src, dst;
67
68 period_size = snd_pcm_lib_period_bytes(substream);
69 dma_offset = prtd->period * period_size;
70 dma_pos = runtime->dma_addr + dma_offset;
71
72 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
73 src = dma_pos;
74 dst = par->sif_out;
75 channel = par->dma_out;
76 } else {
77 src = par->sif_in;
78 dst = dma_pos;
79 channel = par->dma_in;
80 }
81
82 if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel),
83 DMA_INDEX_CHNL(channel)))
84 return;
85
86 if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) {
87 printk(KERN_ERR "s6000-pcm: fifo full\n");
88 return;
89 }
90
91 BUG_ON(period_size & 15);
92 s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel),
93 src, dst, period_size);
94
95 prtd->period++;
96 if (unlikely(prtd->period >= runtime->periods))
97 prtd->period = 0;
98}
99
100static irqreturn_t s6000_pcm_irq(int irq, void *data)
101{
102 struct snd_pcm *pcm = data;
103 struct snd_soc_pcm_runtime *runtime = pcm->private_data;
104 struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data;
105 struct s6000_runtime_data *prtd;
106 unsigned int has_xrun;
107 int i, ret = IRQ_NONE;
108 u32 channel[2] = {
109 [SNDRV_PCM_STREAM_PLAYBACK] = params->dma_out,
110 [SNDRV_PCM_STREAM_CAPTURE] = params->dma_in
111 };
112
113 has_xrun = params->check_xrun(runtime->dai->cpu_dai);
114
115 for (i = 0; i < ARRAY_SIZE(channel); ++i) {
116 struct snd_pcm_substream *substream = pcm->streams[i].substream;
117 unsigned int pending;
118
119 if (!channel[i])
120 continue;
121
122 if (unlikely(has_xrun & (1 << i)) &&
123 substream->runtime &&
124 snd_pcm_running(substream)) {
125 dev_dbg(pcm->dev, "xrun\n");
126 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
127 ret = IRQ_HANDLED;
128 }
129
130 pending = s6dmac_int_sources(DMA_MASK_DMAC(channel[i]),
131 DMA_INDEX_CHNL(channel[i]));
132
133 if (pending & 1) {
134 ret = IRQ_HANDLED;
135 if (likely(substream->runtime &&
136 snd_pcm_running(substream))) {
137 snd_pcm_period_elapsed(substream);
138 dev_dbg(pcm->dev, "period elapsed %x %x\n",
139 s6dmac_cur_src(DMA_MASK_DMAC(channel[i]),
140 DMA_INDEX_CHNL(channel[i])),
141 s6dmac_cur_dst(DMA_MASK_DMAC(channel[i]),
142 DMA_INDEX_CHNL(channel[i])));
143 prtd = substream->runtime->private_data;
144 spin_lock(&prtd->lock);
145 s6000_pcm_enqueue_dma(substream);
146 spin_unlock(&prtd->lock);
147 }
148 }
149
150 if (unlikely(pending & ~7)) {
151 if (pending & (1 << 3))
152 printk(KERN_WARNING
153 "s6000-pcm: DMA %x Underflow\n",
154 channel[i]);
155 if (pending & (1 << 4))
156 printk(KERN_WARNING
157 "s6000-pcm: DMA %x Overflow\n",
158 channel[i]);
159 if (pending & 0x1e0)
160 printk(KERN_WARNING
161 "s6000-pcm: DMA %x Master Error "
162 "(mask %x)\n",
163 channel[i], pending >> 5);
164
165 }
166 }
167
168 return ret;
169}
170
171static int s6000_pcm_start(struct snd_pcm_substream *substream)
172{
173 struct s6000_runtime_data *prtd = substream->runtime->private_data;
174 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
175 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
176 unsigned long flags;
177 int srcinc;
178 u32 dma;
179
180 spin_lock_irqsave(&prtd->lock, flags);
181
182 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
183 srcinc = 1;
184 dma = par->dma_out;
185 } else {
186 srcinc = 0;
187 dma = par->dma_in;
188 }
189 s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
190 1 /* priority 1 (0 is max) */,
191 0 /* peripheral requests w/o xfer length mode */,
192 srcinc /* source address increment */,
193 srcinc^1 /* destination address increment */,
194 0 /* chunksize 0 (skip impossible on this dma) */,
195 0 /* source skip after chunk (impossible) */,
196 0 /* destination skip after chunk (impossible) */,
197 4 /* 16 byte burst size */,
198 -1 /* don't conserve bandwidth */,
199 0 /* low watermark irq descriptor theshold */,
200 0 /* disable hardware timestamps */,
201 1 /* enable channel */);
202
203 s6000_pcm_enqueue_dma(substream);
204 s6000_pcm_enqueue_dma(substream);
205
206 spin_unlock_irqrestore(&prtd->lock, flags);
207
208 return 0;
209}
210
211static int s6000_pcm_stop(struct snd_pcm_substream *substream)
212{
213 struct s6000_runtime_data *prtd = substream->runtime->private_data;
214 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
215 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
216 unsigned long flags;
217 u32 channel;
218
219 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
220 channel = par->dma_out;
221 else
222 channel = par->dma_in;
223
224 s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
225 DMA_INDEX_CHNL(channel), 0);
226
227 spin_lock_irqsave(&prtd->lock, flags);
228
229 s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel));
230
231 spin_unlock_irqrestore(&prtd->lock, flags);
232
233 return 0;
234}
235
236static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
237{
238 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
239 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
240 int ret;
241
242 ret = par->trigger(substream, cmd, 0);
243 if (ret < 0)
244 return ret;
245
246 switch (cmd) {
247 case SNDRV_PCM_TRIGGER_START:
248 case SNDRV_PCM_TRIGGER_RESUME:
249 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
250 ret = s6000_pcm_start(substream);
251 break;
252 case SNDRV_PCM_TRIGGER_STOP:
253 case SNDRV_PCM_TRIGGER_SUSPEND:
254 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
255 ret = s6000_pcm_stop(substream);
256 break;
257 default:
258 ret = -EINVAL;
259 }
260 if (ret < 0)
261 return ret;
262
263 return par->trigger(substream, cmd, 1);
264}
265
266static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
267{
268 struct s6000_runtime_data *prtd = substream->runtime->private_data;
269
270 prtd->period = 0;
271
272 return 0;
273}
274
275static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream)
276{
277 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
278 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
279 struct snd_pcm_runtime *runtime = substream->runtime;
280 struct s6000_runtime_data *prtd = runtime->private_data;
281 unsigned long flags;
282 unsigned int offset;
283 dma_addr_t count;
284
285 spin_lock_irqsave(&prtd->lock, flags);
286
287 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
288 count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out),
289 DMA_INDEX_CHNL(par->dma_out));
290 else
291 count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in),
292 DMA_INDEX_CHNL(par->dma_in));
293
294 count -= runtime->dma_addr;
295
296 spin_unlock_irqrestore(&prtd->lock, flags);
297
298 offset = bytes_to_frames(runtime, count);
299 if (unlikely(offset >= runtime->buffer_size))
300 offset = 0;
301
302 return offset;
303}
304
305static int s6000_pcm_open(struct snd_pcm_substream *substream)
306{
307 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
308 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
309 struct snd_pcm_runtime *runtime = substream->runtime;
310 struct s6000_runtime_data *prtd;
311 int ret;
312
313 snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
314
315 ret = snd_pcm_hw_constraint_step(runtime, 0,
316 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
317 if (ret < 0)
318 return ret;
319 ret = snd_pcm_hw_constraint_step(runtime, 0,
320 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
321 if (ret < 0)
322 return ret;
323 ret = snd_pcm_hw_constraint_integer(runtime,
324 SNDRV_PCM_HW_PARAM_PERIODS);
325 if (ret < 0)
326 return ret;
327
328 if (par->same_rate) {
329 int rate;
330 spin_lock(&par->lock); /* needed? */
331 rate = par->rate;
332 spin_unlock(&par->lock);
333 if (rate != -1) {
334 ret = snd_pcm_hw_constraint_minmax(runtime,
335 SNDRV_PCM_HW_PARAM_RATE,
336 rate, rate);
337 if (ret < 0)
338 return ret;
339 }
340 }
341
342 prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
343 if (prtd == NULL)
344 return -ENOMEM;
345
346 spin_lock_init(&prtd->lock);
347
348 runtime->private_data = prtd;
349
350 return 0;
351}
352
353static int s6000_pcm_close(struct snd_pcm_substream *substream)
354{
355 struct snd_pcm_runtime *runtime = substream->runtime;
356 struct s6000_runtime_data *prtd = runtime->private_data;
357
358 kfree(prtd);
359
360 return 0;
361}
362
363static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
364 struct snd_pcm_hw_params *hw_params)
365{
366 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
367 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
368 int ret;
369 ret = snd_pcm_lib_malloc_pages(substream,
370 params_buffer_bytes(hw_params));
371 if (ret < 0) {
372 printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
373 return ret;
374 }
375
376 if (par->same_rate) {
377 spin_lock(&par->lock);
378 if (par->rate == -1 ||
379 !(par->in_use & ~(1 << substream->stream))) {
380 par->rate = params_rate(hw_params);
381 par->in_use |= 1 << substream->stream;
382 } else if (params_rate(hw_params) != par->rate) {
383 snd_pcm_lib_free_pages(substream);
384 par->in_use &= ~(1 << substream->stream);
385 ret = -EBUSY;
386 }
387 spin_unlock(&par->lock);
388 }
389 return ret;
390}
391
392static int s6000_pcm_hw_free(struct snd_pcm_substream *substream)
393{
394 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
395 struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data;
396
397 spin_lock(&par->lock);
398 par->in_use &= ~(1 << substream->stream);
399 if (!par->in_use)
400 par->rate = -1;
401 spin_unlock(&par->lock);
402
403 return snd_pcm_lib_free_pages(substream);
404}
405
406static struct snd_pcm_ops s6000_pcm_ops = {
407 .open = s6000_pcm_open,
408 .close = s6000_pcm_close,
409 .ioctl = snd_pcm_lib_ioctl,
410 .hw_params = s6000_pcm_hw_params,
411 .hw_free = s6000_pcm_hw_free,
412 .trigger = s6000_pcm_trigger,
413 .prepare = s6000_pcm_prepare,
414 .pointer = s6000_pcm_pointer,
415};
416
417static void s6000_pcm_free(struct snd_pcm *pcm)
418{
419 struct snd_soc_pcm_runtime *runtime = pcm->private_data;
420 struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data;
421
422 free_irq(params->irq, pcm);
423 snd_pcm_lib_preallocate_free_for_all(pcm);
424}
425
426static u64 s6000_pcm_dmamask = DMA_32BIT_MASK;
427
428static int s6000_pcm_new(struct snd_card *card,
429 struct snd_soc_dai *dai, struct snd_pcm *pcm)
430{
431 struct snd_soc_pcm_runtime *runtime = pcm->private_data;
432 struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data;
433 int res;
434
435 if (!card->dev->dma_mask)
436 card->dev->dma_mask = &s6000_pcm_dmamask;
437 if (!card->dev->coherent_dma_mask)
438 card->dev->coherent_dma_mask = DMA_32BIT_MASK;
439
440 if (params->dma_in) {
441 s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
442 DMA_INDEX_CHNL(params->dma_in));
443 s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in),
444 DMA_INDEX_CHNL(params->dma_in));
445 }
446
447 if (params->dma_out) {
448 s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out),
449 DMA_INDEX_CHNL(params->dma_out));
450 s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out),
451 DMA_INDEX_CHNL(params->dma_out));
452 }
453
454 res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED,
455 s6000_soc_platform.name, pcm);
456 if (res) {
457 printk(KERN_ERR "s6000-pcm couldn't get IRQ\n");
458 return res;
459 }
460
461 res = snd_pcm_lib_preallocate_pages_for_all(pcm,
462 SNDRV_DMA_TYPE_DEV,
463 card->dev,
464 S6_PCM_PREALLOCATE_SIZE,
465 S6_PCM_PREALLOCATE_MAX);
466 if (res)
467 printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
468
469 spin_lock_init(&params->lock);
470 params->in_use = 0;
471 params->rate = -1;
472 return 0;
473}
474
475struct snd_soc_platform s6000_soc_platform = {
476 .name = "s6000-audio",
477 .pcm_ops = &s6000_pcm_ops,
478 .pcm_new = s6000_pcm_new,
479 .pcm_free = s6000_pcm_free,
480};
481EXPORT_SYMBOL_GPL(s6000_soc_platform);
482
483static int __init s6000_pcm_init(void)
484{
485 return snd_soc_register_platform(&s6000_soc_platform);
486}
487module_init(s6000_pcm_init);
488
489static void __exit s6000_pcm_exit(void)
490{
491 snd_soc_unregister_platform(&s6000_soc_platform);
492}
493module_exit(s6000_pcm_exit);
494
495MODULE_AUTHOR("Daniel Gloeckner");
496MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
497MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6000-pcm.h b/sound/soc/s6000/s6000-pcm.h
new file mode 100644
index 000000000000..96f23f6f52bf
--- /dev/null
+++ b/sound/soc/s6000/s6000-pcm.h
@@ -0,0 +1,35 @@
1/*
2 * ALSA PCM interface for the Stretch s6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _S6000_PCM_H
13#define _S6000_PCM_H
14
15struct snd_soc_dai;
16struct snd_pcm_substream;
17
18struct s6000_pcm_dma_params {
19 unsigned int (*check_xrun)(struct snd_soc_dai *cpu_dai);
20 int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
21 dma_addr_t sif_in;
22 dma_addr_t sif_out;
23 u32 dma_in;
24 u32 dma_out;
25 int irq;
26 int same_rate;
27
28 spinlock_t lock;
29 int in_use;
30 int rate;
31};
32
33extern struct snd_soc_platform s6000_soc_platform;
34
35#endif
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
new file mode 100644
index 000000000000..b5f95f9781c1
--- /dev/null
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -0,0 +1,244 @@
1/*
2 * ASoC driver for Stretch s6105 IP camera platform
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/timer.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <sound/core.h>
18#include <sound/pcm.h>
19#include <sound/soc.h>
20#include <sound/soc-dapm.h>
21
22#include <variant/dmac.h>
23
24#include "../codecs/tlv320aic3x.h"
25#include "s6000-pcm.h"
26#include "s6000-i2s.h"
27
28#define S6105_CAM_CODEC_CLOCK 12288000
29
30static int s6105_hw_params(struct snd_pcm_substream *substream,
31 struct snd_pcm_hw_params *params)
32{
33 struct snd_soc_pcm_runtime *rtd = substream->private_data;
34 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
35 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
36 int ret = 0;
37
38 /* set codec DAI configuration */
39 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
40 SND_SOC_DAIFMT_CBM_CFM);
41 if (ret < 0)
42 return ret;
43
44 /* set cpu DAI configuration */
45 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM |
46 SND_SOC_DAIFMT_NB_NF);
47 if (ret < 0)
48 return ret;
49
50 /* set the codec system clock */
51 ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK,
52 SND_SOC_CLOCK_OUT);
53 if (ret < 0)
54 return ret;
55
56 return 0;
57}
58
59static struct snd_soc_ops s6105_ops = {
60 .hw_params = s6105_hw_params,
61};
62
63/* s6105 machine dapm widgets */
64static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
65 SND_SOC_DAPM_LINE("Audio Out Differential", NULL),
66 SND_SOC_DAPM_LINE("Audio Out Stereo", NULL),
67 SND_SOC_DAPM_LINE("Audio In", NULL),
68};
69
70/* s6105 machine audio_mapnections to the codec pins */
71static const struct snd_soc_dapm_route audio_map[] = {
72 /* Audio Out connected to HPLOUT, HPLCOM, HPROUT */
73 {"Audio Out Differential", NULL, "HPLOUT"},
74 {"Audio Out Differential", NULL, "HPLCOM"},
75 {"Audio Out Stereo", NULL, "HPLOUT"},
76 {"Audio Out Stereo", NULL, "HPROUT"},
77
78 /* Audio In connected to LINE1L, LINE1R */
79 {"LINE1L", NULL, "Audio In"},
80 {"LINE1R", NULL, "Audio In"},
81};
82
83static int output_type_info(struct snd_kcontrol *kcontrol,
84 struct snd_ctl_elem_info *uinfo)
85{
86 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
87 uinfo->count = 1;
88 uinfo->value.enumerated.items = 2;
89 if (uinfo->value.enumerated.item) {
90 uinfo->value.enumerated.item = 1;
91 strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT");
92 } else {
93 strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM");
94 }
95 return 0;
96}
97
98static int output_type_get(struct snd_kcontrol *kcontrol,
99 struct snd_ctl_elem_value *ucontrol)
100{
101 ucontrol->value.enumerated.item[0] = kcontrol->private_value;
102 return 0;
103}
104
105static int output_type_put(struct snd_kcontrol *kcontrol,
106 struct snd_ctl_elem_value *ucontrol)
107{
108 struct snd_soc_codec *codec = kcontrol->private_data;
109 unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
110 char *differential = "Audio Out Differential";
111 char *stereo = "Audio Out Stereo";
112
113 if (kcontrol->private_value == val)
114 return 0;
115 kcontrol->private_value = val;
116 snd_soc_dapm_disable_pin(codec, val ? differential : stereo);
117 snd_soc_dapm_sync(codec);
118 snd_soc_dapm_enable_pin(codec, val ? stereo : differential);
119 snd_soc_dapm_sync(codec);
120
121 return 1;
122}
123
124static const struct snd_kcontrol_new audio_out_mux = {
125 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
126 .name = "Master Output Mux",
127 .index = 0,
128 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
129 .info = output_type_info,
130 .get = output_type_get,
131 .put = output_type_put,
132 .private_value = 1 /* default to stereo */
133};
134
135/* Logic for a aic3x as connected on the s6105 ip camera ref design */
136static int s6105_aic3x_init(struct snd_soc_codec *codec)
137{
138 /* Add s6105 specific widgets */
139 snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
140 ARRAY_SIZE(aic3x_dapm_widgets));
141
142 /* Set up s6105 specific audio path audio_map */
143 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
144
145 /* not present */
146 snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
147 snd_soc_dapm_nc_pin(codec, "LINE2L");
148 snd_soc_dapm_nc_pin(codec, "LINE2R");
149
150 /* not connected */
151 snd_soc_dapm_nc_pin(codec, "MIC3L"); /* LINE2L on this chip */
152 snd_soc_dapm_nc_pin(codec, "MIC3R"); /* LINE2R on this chip */
153 snd_soc_dapm_nc_pin(codec, "LLOUT");
154 snd_soc_dapm_nc_pin(codec, "RLOUT");
155 snd_soc_dapm_nc_pin(codec, "HPRCOM");
156
157 /* always connected */
158 snd_soc_dapm_enable_pin(codec, "Audio In");
159
160 /* must correspond to audio_out_mux.private_value initializer */
161 snd_soc_dapm_disable_pin(codec, "Audio Out Differential");
162 snd_soc_dapm_sync(codec);
163 snd_soc_dapm_enable_pin(codec, "Audio Out Stereo");
164
165 snd_soc_dapm_sync(codec);
166
167 snd_ctl_add(codec->card, snd_ctl_new1(&audio_out_mux, codec));
168
169 return 0;
170}
171
172/* s6105 digital audio interface glue - connects codec <--> CPU */
173static struct snd_soc_dai_link s6105_dai = {
174 .name = "TLV320AIC31",
175 .stream_name = "AIC31",
176 .cpu_dai = &s6000_i2s_dai,
177 .codec_dai = &aic3x_dai,
178 .init = s6105_aic3x_init,
179 .ops = &s6105_ops,
180};
181
182/* s6105 audio machine driver */
183static struct snd_soc_card snd_soc_card_s6105 = {
184 .name = "Stretch IP Camera",
185 .platform = &s6000_soc_platform,
186 .dai_link = &s6105_dai,
187 .num_links = 1,
188};
189
190/* s6105 audio private data */
191static struct aic3x_setup_data s6105_aic3x_setup = {
192 .i2c_bus = 0,
193 .i2c_address = 0x18,
194};
195
196/* s6105 audio subsystem */
197static struct snd_soc_device s6105_snd_devdata = {
198 .card = &snd_soc_card_s6105,
199 .codec_dev = &soc_codec_dev_aic3x,
200 .codec_data = &s6105_aic3x_setup,
201};
202
203static struct s6000_snd_platform_data __initdata s6105_snd_data = {
204 .wide = 0,
205 .channel_in = 0,
206 .channel_out = 1,
207 .lines_in = 1,
208 .lines_out = 1,
209 .same_rate = 1,
210};
211
212static struct platform_device *s6105_snd_device;
213
214static int __init s6105_init(void)
215{
216 int ret;
217
218 s6105_snd_device = platform_device_alloc("soc-audio", -1);
219 if (!s6105_snd_device)
220 return -ENOMEM;
221
222 platform_set_drvdata(s6105_snd_device, &s6105_snd_devdata);
223 s6105_snd_devdata.dev = &s6105_snd_device->dev;
224 platform_device_add_data(s6105_snd_device, &s6105_snd_data,
225 sizeof(s6105_snd_data));
226
227 ret = platform_device_add(s6105_snd_device);
228 if (ret)
229 platform_device_put(s6105_snd_device);
230
231 return ret;
232}
233
234static void __exit s6105_exit(void)
235{
236 platform_device_unregister(s6105_snd_device);
237}
238
239module_init(s6105_init);
240module_exit(s6105_exit);
241
242MODULE_AUTHOR("Daniel Gloeckner");
243MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver");
244MODULE_LICENSE("GPL");
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index 56fa0872abbb..b378096cadb1 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -145,7 +145,7 @@ static int ssi_hw_params(struct snd_pcm_substream *substream,
145 recv = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 0 : 1; 145 recv = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 0 : 1;
146 146
147 pr_debug("ssi_hw_params() enter\nssicr was %08lx\n", ssicr); 147 pr_debug("ssi_hw_params() enter\nssicr was %08lx\n", ssicr);
148 pr_debug("bits: %d channels: %d\n", bits, channels); 148 pr_debug("bits: %u channels: %u\n", bits, channels);
149 149
150 ssicr &= ~(CR_TRMD | CR_CHNL_MASK | CR_DWL_MASK | CR_PDTA | 150 ssicr &= ~(CR_TRMD | CR_CHNL_MASK | CR_DWL_MASK | CR_PDTA |
151 CR_SWL_MASK); 151 CR_SWL_MASK);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1cd149b9ce69..3f44150d8e30 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -113,6 +113,35 @@ static int soc_ac97_dev_register(struct snd_soc_codec *codec)
113} 113}
114#endif 114#endif
115 115
116static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream)
117{
118 struct snd_soc_pcm_runtime *rtd = substream->private_data;
119 struct snd_soc_device *socdev = rtd->socdev;
120 struct snd_soc_card *card = socdev->card;
121 struct snd_soc_dai_link *machine = rtd->dai;
122 struct snd_soc_dai *cpu_dai = machine->cpu_dai;
123 struct snd_soc_dai *codec_dai = machine->codec_dai;
124 int ret;
125
126 if (codec_dai->symmetric_rates || cpu_dai->symmetric_rates ||
127 machine->symmetric_rates) {
128 dev_dbg(card->dev, "Symmetry forces %dHz rate\n",
129 machine->rate);
130
131 ret = snd_pcm_hw_constraint_minmax(substream->runtime,
132 SNDRV_PCM_HW_PARAM_RATE,
133 machine->rate,
134 machine->rate);
135 if (ret < 0) {
136 dev_err(card->dev,
137 "Unable to apply rate symmetry constraint: %d\n", ret);
138 return ret;
139 }
140 }
141
142 return 0;
143}
144
116/* 145/*
117 * Called by ALSA when a PCM substream is opened, the runtime->hw record is 146 * Called by ALSA when a PCM substream is opened, the runtime->hw record is
118 * then initialized and any private data can be allocated. This also calls 147 * then initialized and any private data can be allocated. This also calls
@@ -221,6 +250,13 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
221 goto machine_err; 250 goto machine_err;
222 } 251 }
223 252
253 /* Symmetry only applies if we've already got an active stream. */
254 if (cpu_dai->active || codec_dai->active) {
255 ret = soc_pcm_apply_symmetry(substream);
256 if (ret != 0)
257 goto machine_err;
258 }
259
224 pr_debug("asoc: %s <-> %s info:\n", codec_dai->name, cpu_dai->name); 260 pr_debug("asoc: %s <-> %s info:\n", codec_dai->name, cpu_dai->name);
225 pr_debug("asoc: rate mask 0x%x\n", runtime->hw.rates); 261 pr_debug("asoc: rate mask 0x%x\n", runtime->hw.rates);
226 pr_debug("asoc: min ch %d max ch %d\n", runtime->hw.channels_min, 262 pr_debug("asoc: min ch %d max ch %d\n", runtime->hw.channels_min,
@@ -263,7 +299,6 @@ static void close_delayed_work(struct work_struct *work)
263{ 299{
264 struct snd_soc_card *card = container_of(work, struct snd_soc_card, 300 struct snd_soc_card *card = container_of(work, struct snd_soc_card,
265 delayed_work.work); 301 delayed_work.work);
266 struct snd_soc_device *socdev = card->socdev;
267 struct snd_soc_codec *codec = card->codec; 302 struct snd_soc_codec *codec = card->codec;
268 struct snd_soc_dai *codec_dai; 303 struct snd_soc_dai *codec_dai;
269 int i; 304 int i;
@@ -279,27 +314,10 @@ static void close_delayed_work(struct work_struct *work)
279 314
280 /* are we waiting on this codec DAI stream */ 315 /* are we waiting on this codec DAI stream */
281 if (codec_dai->pop_wait == 1) { 316 if (codec_dai->pop_wait == 1) {
282
283 /* Reduce power if no longer active */
284 if (codec->active == 0) {
285 pr_debug("pop wq D1 %s %s\n", codec->name,
286 codec_dai->playback.stream_name);
287 snd_soc_dapm_set_bias_level(socdev,
288 SND_SOC_BIAS_PREPARE);
289 }
290
291 codec_dai->pop_wait = 0; 317 codec_dai->pop_wait = 0;
292 snd_soc_dapm_stream_event(codec, 318 snd_soc_dapm_stream_event(codec,
293 codec_dai->playback.stream_name, 319 codec_dai->playback.stream_name,
294 SND_SOC_DAPM_STREAM_STOP); 320 SND_SOC_DAPM_STREAM_STOP);
295
296 /* Fall into standby if no longer active */
297 if (codec->active == 0) {
298 pr_debug("pop wq D3 %s %s\n", codec->name,
299 codec_dai->playback.stream_name);
300 snd_soc_dapm_set_bias_level(socdev,
301 SND_SOC_BIAS_STANDBY);
302 }
303 } 321 }
304 } 322 }
305 mutex_unlock(&pcm_mutex); 323 mutex_unlock(&pcm_mutex);
@@ -363,10 +381,6 @@ static int soc_codec_close(struct snd_pcm_substream *substream)
363 snd_soc_dapm_stream_event(codec, 381 snd_soc_dapm_stream_event(codec,
364 codec_dai->capture.stream_name, 382 codec_dai->capture.stream_name,
365 SND_SOC_DAPM_STREAM_STOP); 383 SND_SOC_DAPM_STREAM_STOP);
366
367 if (codec->active == 0 && codec_dai->pop_wait == 0)
368 snd_soc_dapm_set_bias_level(socdev,
369 SND_SOC_BIAS_STANDBY);
370 } 384 }
371 385
372 mutex_unlock(&pcm_mutex); 386 mutex_unlock(&pcm_mutex);
@@ -431,36 +445,16 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
431 cancel_delayed_work(&card->delayed_work); 445 cancel_delayed_work(&card->delayed_work);
432 } 446 }
433 447
434 /* do we need to power up codec */ 448 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
435 if (codec->bias_level != SND_SOC_BIAS_ON) { 449 snd_soc_dapm_stream_event(codec,
436 snd_soc_dapm_set_bias_level(socdev, 450 codec_dai->playback.stream_name,
437 SND_SOC_BIAS_PREPARE); 451 SND_SOC_DAPM_STREAM_START);
438 452 else
439 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 453 snd_soc_dapm_stream_event(codec,
440 snd_soc_dapm_stream_event(codec, 454 codec_dai->capture.stream_name,
441 codec_dai->playback.stream_name, 455 SND_SOC_DAPM_STREAM_START);
442 SND_SOC_DAPM_STREAM_START);
443 else
444 snd_soc_dapm_stream_event(codec,
445 codec_dai->capture.stream_name,
446 SND_SOC_DAPM_STREAM_START);
447
448 snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_ON);
449 snd_soc_dai_digital_mute(codec_dai, 0);
450
451 } else {
452 /* codec already powered - power on widgets */
453 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
454 snd_soc_dapm_stream_event(codec,
455 codec_dai->playback.stream_name,
456 SND_SOC_DAPM_STREAM_START);
457 else
458 snd_soc_dapm_stream_event(codec,
459 codec_dai->capture.stream_name,
460 SND_SOC_DAPM_STREAM_START);
461 456
462 snd_soc_dai_digital_mute(codec_dai, 0); 457 snd_soc_dai_digital_mute(codec_dai, 0);
463 }
464 458
465out: 459out:
466 mutex_unlock(&pcm_mutex); 460 mutex_unlock(&pcm_mutex);
@@ -521,6 +515,8 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
521 } 515 }
522 } 516 }
523 517
518 machine->rate = params_rate(params);
519
524out: 520out:
525 mutex_unlock(&pcm_mutex); 521 mutex_unlock(&pcm_mutex);
526 return ret; 522 return ret;
@@ -632,6 +628,12 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
632 struct snd_soc_codec *codec = card->codec; 628 struct snd_soc_codec *codec = card->codec;
633 int i; 629 int i;
634 630
631 /* If the initialization of this soc device failed, there is no codec
632 * associated with it. Just bail out in this case.
633 */
634 if (!codec)
635 return 0;
636
635 /* Due to the resume being scheduled into a workqueue we could 637 /* Due to the resume being scheduled into a workqueue we could
636 * suspend before that's finished - wait for it to complete. 638 * suspend before that's finished - wait for it to complete.
637 */ 639 */
@@ -1334,6 +1336,7 @@ int snd_soc_new_pcms(struct snd_soc_device *socdev, int idx, const char *xid)
1334 return ret; 1336 return ret;
1335 } 1337 }
1336 1338
1339 codec->socdev = socdev;
1337 codec->card->dev = socdev->dev; 1340 codec->card->dev = socdev->dev;
1338 codec->card->private_data = codec; 1341 codec->card->private_data = codec;
1339 strncpy(codec->card->driver, codec->name, sizeof(codec->card->driver)); 1342 strncpy(codec->card->driver, codec->name, sizeof(codec->card->driver));
@@ -1744,7 +1747,7 @@ int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol,
1744{ 1747{
1745 int max = kcontrol->private_value; 1748 int max = kcontrol->private_value;
1746 1749
1747 if (max == 1) 1750 if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
1748 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; 1751 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
1749 else 1752 else
1750 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 1753 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
@@ -1774,7 +1777,7 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
1774 unsigned int shift = mc->shift; 1777 unsigned int shift = mc->shift;
1775 unsigned int rshift = mc->rshift; 1778 unsigned int rshift = mc->rshift;
1776 1779
1777 if (max == 1) 1780 if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
1778 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; 1781 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
1779 else 1782 else
1780 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 1783 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
@@ -1881,7 +1884,7 @@ int snd_soc_info_volsw_2r(struct snd_kcontrol *kcontrol,
1881 (struct soc_mixer_control *)kcontrol->private_value; 1884 (struct soc_mixer_control *)kcontrol->private_value;
1882 int max = mc->max; 1885 int max = mc->max;
1883 1886
1884 if (max == 1) 1887 if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
1885 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; 1888 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
1886 else 1889 else
1887 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 1890 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
@@ -2065,7 +2068,7 @@ EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8);
2065int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, 2068int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id,
2066 unsigned int freq, int dir) 2069 unsigned int freq, int dir)
2067{ 2070{
2068 if (dai->ops->set_sysclk) 2071 if (dai->ops && dai->ops->set_sysclk)
2069 return dai->ops->set_sysclk(dai, clk_id, freq, dir); 2072 return dai->ops->set_sysclk(dai, clk_id, freq, dir);
2070 else 2073 else
2071 return -EINVAL; 2074 return -EINVAL;
@@ -2085,7 +2088,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk);
2085int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai, 2088int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai,
2086 int div_id, int div) 2089 int div_id, int div)
2087{ 2090{
2088 if (dai->ops->set_clkdiv) 2091 if (dai->ops && dai->ops->set_clkdiv)
2089 return dai->ops->set_clkdiv(dai, div_id, div); 2092 return dai->ops->set_clkdiv(dai, div_id, div);
2090 else 2093 else
2091 return -EINVAL; 2094 return -EINVAL;
@@ -2104,7 +2107,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv);
2104int snd_soc_dai_set_pll(struct snd_soc_dai *dai, 2107int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
2105 int pll_id, unsigned int freq_in, unsigned int freq_out) 2108 int pll_id, unsigned int freq_in, unsigned int freq_out)
2106{ 2109{
2107 if (dai->ops->set_pll) 2110 if (dai->ops && dai->ops->set_pll)
2108 return dai->ops->set_pll(dai, pll_id, freq_in, freq_out); 2111 return dai->ops->set_pll(dai, pll_id, freq_in, freq_out);
2109 else 2112 else
2110 return -EINVAL; 2113 return -EINVAL;
@@ -2120,7 +2123,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll);
2120 */ 2123 */
2121int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) 2124int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
2122{ 2125{
2123 if (dai->ops->set_fmt) 2126 if (dai->ops && dai->ops->set_fmt)
2124 return dai->ops->set_fmt(dai, fmt); 2127 return dai->ops->set_fmt(dai, fmt);
2125 else 2128 else
2126 return -EINVAL; 2129 return -EINVAL;
@@ -2139,7 +2142,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
2139int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, 2142int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
2140 unsigned int mask, int slots) 2143 unsigned int mask, int slots)
2141{ 2144{
2142 if (dai->ops->set_sysclk) 2145 if (dai->ops && dai->ops->set_tdm_slot)
2143 return dai->ops->set_tdm_slot(dai, mask, slots); 2146 return dai->ops->set_tdm_slot(dai, mask, slots);
2144 else 2147 else
2145 return -EINVAL; 2148 return -EINVAL;
@@ -2155,7 +2158,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot);
2155 */ 2158 */
2156int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate) 2159int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate)
2157{ 2160{
2158 if (dai->ops->set_sysclk) 2161 if (dai->ops && dai->ops->set_tristate)
2159 return dai->ops->set_tristate(dai, tristate); 2162 return dai->ops->set_tristate(dai, tristate);
2160 else 2163 else
2161 return -EINVAL; 2164 return -EINVAL;
@@ -2171,7 +2174,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
2171 */ 2174 */
2172int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute) 2175int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute)
2173{ 2176{
2174 if (dai->ops->digital_mute) 2177 if (dai->ops && dai->ops->digital_mute)
2175 return dai->ops->digital_mute(dai, mute); 2178 return dai->ops->digital_mute(dai, mute);
2176 else 2179 else
2177 return -EINVAL; 2180 return -EINVAL;
@@ -2352,6 +2355,39 @@ void snd_soc_unregister_platform(struct snd_soc_platform *platform)
2352} 2355}
2353EXPORT_SYMBOL_GPL(snd_soc_unregister_platform); 2356EXPORT_SYMBOL_GPL(snd_soc_unregister_platform);
2354 2357
2358static u64 codec_format_map[] = {
2359 SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE,
2360 SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE,
2361 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE,
2362 SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE,
2363 SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE,
2364 SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE,
2365 SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE,
2366 SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_U24_3BE,
2367 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE,
2368 SNDRV_PCM_FMTBIT_U20_3LE | SNDRV_PCM_FMTBIT_U20_3BE,
2369 SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE,
2370 SNDRV_PCM_FMTBIT_U18_3LE | SNDRV_PCM_FMTBIT_U18_3BE,
2371 SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE,
2372 SNDRV_PCM_FMTBIT_FLOAT64_LE | SNDRV_PCM_FMTBIT_FLOAT64_BE,
2373 SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
2374 | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE,
2375};
2376
2377/* Fix up the DAI formats for endianness: codecs don't actually see
2378 * the endianness of the data but we're using the CPU format
2379 * definitions which do need to include endianness so we ensure that
2380 * codec DAIs always have both big and little endian variants set.
2381 */
2382static void fixup_codec_formats(struct snd_soc_pcm_stream *stream)
2383{
2384 int i;
2385
2386 for (i = 0; i < ARRAY_SIZE(codec_format_map); i++)
2387 if (stream->formats & codec_format_map[i])
2388 stream->formats |= codec_format_map[i];
2389}
2390
2355/** 2391/**
2356 * snd_soc_register_codec - Register a codec with the ASoC core 2392 * snd_soc_register_codec - Register a codec with the ASoC core
2357 * 2393 *
@@ -2359,6 +2395,8 @@ EXPORT_SYMBOL_GPL(snd_soc_unregister_platform);
2359 */ 2395 */
2360int snd_soc_register_codec(struct snd_soc_codec *codec) 2396int snd_soc_register_codec(struct snd_soc_codec *codec)
2361{ 2397{
2398 int i;
2399
2362 if (!codec->name) 2400 if (!codec->name)
2363 return -EINVAL; 2401 return -EINVAL;
2364 2402
@@ -2368,6 +2406,11 @@ int snd_soc_register_codec(struct snd_soc_codec *codec)
2368 2406
2369 INIT_LIST_HEAD(&codec->list); 2407 INIT_LIST_HEAD(&codec->list);
2370 2408
2409 for (i = 0; i < codec->num_dai; i++) {
2410 fixup_codec_formats(&codec->dai[i].playback);
2411 fixup_codec_formats(&codec->dai[i].capture);
2412 }
2413
2371 mutex_lock(&client_mutex); 2414 mutex_lock(&client_mutex);
2372 list_add(&codec->list, &codec_list); 2415 list_add(&codec->list, &codec_list);
2373 snd_soc_instantiate_cards(); 2416 snd_soc_instantiate_cards();
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 735903a74675..21c69074aa17 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -12,7 +12,7 @@
12 * Features: 12 * Features:
13 * o Changes power status of internal codec blocks depending on the 13 * o Changes power status of internal codec blocks depending on the
14 * dynamic configuration of codec internal audio paths and active 14 * dynamic configuration of codec internal audio paths and active
15 * DAC's/ADC's. 15 * DACs/ADCs.
16 * o Platform power domain - can support external components i.e. amps and 16 * o Platform power domain - can support external components i.e. amps and
17 * mic/meadphone insertion events. 17 * mic/meadphone insertion events.
18 * o Automatic Mic Bias support 18 * o Automatic Mic Bias support
@@ -52,23 +52,21 @@
52 52
53/* dapm power sequences - make this per codec in the future */ 53/* dapm power sequences - make this per codec in the future */
54static int dapm_up_seq[] = { 54static int dapm_up_seq[] = {
55 snd_soc_dapm_pre, snd_soc_dapm_micbias, snd_soc_dapm_mic, 55 snd_soc_dapm_pre, snd_soc_dapm_supply, snd_soc_dapm_micbias,
56 snd_soc_dapm_mux, snd_soc_dapm_value_mux, snd_soc_dapm_dac, 56 snd_soc_dapm_mic, snd_soc_dapm_mux, snd_soc_dapm_value_mux,
57 snd_soc_dapm_mixer, snd_soc_dapm_mixer_named_ctl, snd_soc_dapm_pga, 57 snd_soc_dapm_dac, snd_soc_dapm_mixer, snd_soc_dapm_mixer_named_ctl,
58 snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk, snd_soc_dapm_post 58 snd_soc_dapm_pga, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk,
59 snd_soc_dapm_post
59}; 60};
60 61
61static int dapm_down_seq[] = { 62static int dapm_down_seq[] = {
62 snd_soc_dapm_pre, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk, 63 snd_soc_dapm_pre, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk,
63 snd_soc_dapm_pga, snd_soc_dapm_mixer_named_ctl, snd_soc_dapm_mixer, 64 snd_soc_dapm_pga, snd_soc_dapm_mixer_named_ctl, snd_soc_dapm_mixer,
64 snd_soc_dapm_dac, snd_soc_dapm_mic, snd_soc_dapm_micbias, 65 snd_soc_dapm_dac, snd_soc_dapm_mic, snd_soc_dapm_micbias,
65 snd_soc_dapm_mux, snd_soc_dapm_value_mux, snd_soc_dapm_post 66 snd_soc_dapm_mux, snd_soc_dapm_value_mux, snd_soc_dapm_supply,
67 snd_soc_dapm_post
66}; 68};
67 69
68static int dapm_status = 1;
69module_param(dapm_status, int, 0);
70MODULE_PARM_DESC(dapm_status, "enable DPM sysfs entries");
71
72static void pop_wait(u32 pop_time) 70static void pop_wait(u32 pop_time)
73{ 71{
74 if (pop_time) 72 if (pop_time)
@@ -96,6 +94,48 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
96 return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL); 94 return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
97} 95}
98 96
97/**
98 * snd_soc_dapm_set_bias_level - set the bias level for the system
99 * @socdev: audio device
100 * @level: level to configure
101 *
102 * Configure the bias (power) levels for the SoC audio device.
103 *
104 * Returns 0 for success else error.
105 */
106static int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev,
107 enum snd_soc_bias_level level)
108{
109 struct snd_soc_card *card = socdev->card;
110 struct snd_soc_codec *codec = socdev->card->codec;
111 int ret = 0;
112
113 switch (level) {
114 case SND_SOC_BIAS_ON:
115 dev_dbg(socdev->dev, "Setting full bias\n");
116 break;
117 case SND_SOC_BIAS_PREPARE:
118 dev_dbg(socdev->dev, "Setting bias prepare\n");
119 break;
120 case SND_SOC_BIAS_STANDBY:
121 dev_dbg(socdev->dev, "Setting standby bias\n");
122 break;
123 case SND_SOC_BIAS_OFF:
124 dev_dbg(socdev->dev, "Setting bias off\n");
125 break;
126 default:
127 dev_err(socdev->dev, "Setting invalid bias %d\n", level);
128 return -EINVAL;
129 }
130
131 if (card->set_bias_level)
132 ret = card->set_bias_level(card, level);
133 if (ret == 0 && codec->set_bias_level)
134 ret = codec->set_bias_level(codec, level);
135
136 return ret;
137}
138
99/* set up initial codec paths */ 139/* set up initial codec paths */
100static void dapm_set_path_status(struct snd_soc_dapm_widget *w, 140static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
101 struct snd_soc_dapm_path *p, int i) 141 struct snd_soc_dapm_path *p, int i)
@@ -165,6 +205,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
165 case snd_soc_dapm_dac: 205 case snd_soc_dapm_dac:
166 case snd_soc_dapm_micbias: 206 case snd_soc_dapm_micbias:
167 case snd_soc_dapm_vmid: 207 case snd_soc_dapm_vmid:
208 case snd_soc_dapm_supply:
168 p->connect = 1; 209 p->connect = 1;
169 break; 210 break;
170 /* does effect routing - dynamically connected */ 211 /* does effect routing - dynamically connected */
@@ -179,7 +220,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
179 } 220 }
180} 221}
181 222
182/* connect mux widget to it's interconnecting audio paths */ 223/* connect mux widget to its interconnecting audio paths */
183static int dapm_connect_mux(struct snd_soc_codec *codec, 224static int dapm_connect_mux(struct snd_soc_codec *codec,
184 struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest, 225 struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
185 struct snd_soc_dapm_path *path, const char *control_name, 226 struct snd_soc_dapm_path *path, const char *control_name,
@@ -202,7 +243,7 @@ static int dapm_connect_mux(struct snd_soc_codec *codec,
202 return -ENODEV; 243 return -ENODEV;
203} 244}
204 245
205/* connect mixer widget to it's interconnecting audio paths */ 246/* connect mixer widget to its interconnecting audio paths */
206static int dapm_connect_mixer(struct snd_soc_codec *codec, 247static int dapm_connect_mixer(struct snd_soc_codec *codec,
207 struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest, 248 struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
208 struct snd_soc_dapm_path *path, const char *control_name) 249 struct snd_soc_dapm_path *path, const char *control_name)
@@ -357,8 +398,9 @@ static int dapm_new_mixer(struct snd_soc_codec *codec,
357 path->long_name); 398 path->long_name);
358 ret = snd_ctl_add(codec->card, path->kcontrol); 399 ret = snd_ctl_add(codec->card, path->kcontrol);
359 if (ret < 0) { 400 if (ret < 0) {
360 printk(KERN_ERR "asoc: failed to add dapm kcontrol %s\n", 401 printk(KERN_ERR "asoc: failed to add dapm kcontrol %s: %d\n",
361 path->long_name); 402 path->long_name,
403 ret);
362 kfree(path->long_name); 404 kfree(path->long_name);
363 path->long_name = NULL; 405 path->long_name = NULL;
364 return ret; 406 return ret;
@@ -434,6 +476,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
434 struct snd_soc_dapm_path *path; 476 struct snd_soc_dapm_path *path;
435 int con = 0; 477 int con = 0;
436 478
479 if (widget->id == snd_soc_dapm_supply)
480 return 0;
481
437 if (widget->id == snd_soc_dapm_adc && widget->active) 482 if (widget->id == snd_soc_dapm_adc && widget->active)
438 return 1; 483 return 1;
439 484
@@ -470,6 +515,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
470 struct snd_soc_dapm_path *path; 515 struct snd_soc_dapm_path *path;
471 int con = 0; 516 int con = 0;
472 517
518 if (widget->id == snd_soc_dapm_supply)
519 return 0;
520
473 /* active stream ? */ 521 /* active stream ? */
474 if (widget->id == snd_soc_dapm_dac && widget->active) 522 if (widget->id == snd_soc_dapm_dac && widget->active)
475 return 1; 523 return 1;
@@ -521,84 +569,12 @@ int dapm_reg_event(struct snd_soc_dapm_widget *w,
521} 569}
522EXPORT_SYMBOL_GPL(dapm_reg_event); 570EXPORT_SYMBOL_GPL(dapm_reg_event);
523 571
524/* 572/* Standard power change method, used to apply power changes to most
525 * Scan a single DAPM widget for a complete audio path and update the 573 * widgets.
526 * power status appropriately.
527 */ 574 */
528static int dapm_power_widget(struct snd_soc_codec *codec, int event, 575static int dapm_generic_apply_power(struct snd_soc_dapm_widget *w)
529 struct snd_soc_dapm_widget *w)
530{ 576{
531 int in, out, power_change, power, ret; 577 int ret;
532
533 /* vmid - no action */
534 if (w->id == snd_soc_dapm_vmid)
535 return 0;
536
537 /* active ADC */
538 if (w->id == snd_soc_dapm_adc && w->active) {
539 in = is_connected_input_ep(w);
540 dapm_clear_walk(w->codec);
541 w->power = (in != 0) ? 1 : 0;
542 dapm_update_bits(w);
543 return 0;
544 }
545
546 /* active DAC */
547 if (w->id == snd_soc_dapm_dac && w->active) {
548 out = is_connected_output_ep(w);
549 dapm_clear_walk(w->codec);
550 w->power = (out != 0) ? 1 : 0;
551 dapm_update_bits(w);
552 return 0;
553 }
554
555 /* pre and post event widgets */
556 if (w->id == snd_soc_dapm_pre) {
557 if (!w->event)
558 return 0;
559
560 if (event == SND_SOC_DAPM_STREAM_START) {
561 ret = w->event(w,
562 NULL, SND_SOC_DAPM_PRE_PMU);
563 if (ret < 0)
564 return ret;
565 } else if (event == SND_SOC_DAPM_STREAM_STOP) {
566 ret = w->event(w,
567 NULL, SND_SOC_DAPM_PRE_PMD);
568 if (ret < 0)
569 return ret;
570 }
571 return 0;
572 }
573 if (w->id == snd_soc_dapm_post) {
574 if (!w->event)
575 return 0;
576
577 if (event == SND_SOC_DAPM_STREAM_START) {
578 ret = w->event(w,
579 NULL, SND_SOC_DAPM_POST_PMU);
580 if (ret < 0)
581 return ret;
582 } else if (event == SND_SOC_DAPM_STREAM_STOP) {
583 ret = w->event(w,
584 NULL, SND_SOC_DAPM_POST_PMD);
585 if (ret < 0)
586 return ret;
587 }
588 return 0;
589 }
590
591 /* all other widgets */
592 in = is_connected_input_ep(w);
593 dapm_clear_walk(w->codec);
594 out = is_connected_output_ep(w);
595 dapm_clear_walk(w->codec);
596 power = (out != 0 && in != 0) ? 1 : 0;
597 power_change = (w->power == power) ? 0 : 1;
598 w->power = power;
599
600 if (!power_change)
601 return 0;
602 578
603 /* call any power change event handlers */ 579 /* call any power change event handlers */
604 if (w->event) 580 if (w->event)
@@ -607,7 +583,7 @@ static int dapm_power_widget(struct snd_soc_codec *codec, int event,
607 w->name, w->event_flags); 583 w->name, w->event_flags);
608 584
609 /* power up pre event */ 585 /* power up pre event */
610 if (power && w->event && 586 if (w->power && w->event &&
611 (w->event_flags & SND_SOC_DAPM_PRE_PMU)) { 587 (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
612 ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU); 588 ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
613 if (ret < 0) 589 if (ret < 0)
@@ -615,7 +591,7 @@ static int dapm_power_widget(struct snd_soc_codec *codec, int event,
615 } 591 }
616 592
617 /* power down pre event */ 593 /* power down pre event */
618 if (!power && w->event && 594 if (!w->power && w->event &&
619 (w->event_flags & SND_SOC_DAPM_PRE_PMD)) { 595 (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
620 ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD); 596 ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
621 if (ret < 0) 597 if (ret < 0)
@@ -623,17 +599,17 @@ static int dapm_power_widget(struct snd_soc_codec *codec, int event,
623 } 599 }
624 600
625 /* Lower PGA volume to reduce pops */ 601 /* Lower PGA volume to reduce pops */
626 if (w->id == snd_soc_dapm_pga && !power) 602 if (w->id == snd_soc_dapm_pga && !w->power)
627 dapm_set_pga(w, power); 603 dapm_set_pga(w, w->power);
628 604
629 dapm_update_bits(w); 605 dapm_update_bits(w);
630 606
631 /* Raise PGA volume to reduce pops */ 607 /* Raise PGA volume to reduce pops */
632 if (w->id == snd_soc_dapm_pga && power) 608 if (w->id == snd_soc_dapm_pga && w->power)
633 dapm_set_pga(w, power); 609 dapm_set_pga(w, w->power);
634 610
635 /* power up post event */ 611 /* power up post event */
636 if (power && w->event && 612 if (w->power && w->event &&
637 (w->event_flags & SND_SOC_DAPM_POST_PMU)) { 613 (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
638 ret = w->event(w, 614 ret = w->event(w,
639 NULL, SND_SOC_DAPM_POST_PMU); 615 NULL, SND_SOC_DAPM_POST_PMU);
@@ -642,7 +618,7 @@ static int dapm_power_widget(struct snd_soc_codec *codec, int event,
642 } 618 }
643 619
644 /* power down post event */ 620 /* power down post event */
645 if (!power && w->event && 621 if (!w->power && w->event &&
646 (w->event_flags & SND_SOC_DAPM_POST_PMD)) { 622 (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
647 ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD); 623 ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
648 if (ret < 0) 624 if (ret < 0)
@@ -652,6 +628,116 @@ static int dapm_power_widget(struct snd_soc_codec *codec, int event,
652 return 0; 628 return 0;
653} 629}
654 630
631/* Generic check to see if a widget should be powered.
632 */
633static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
634{
635 int in, out;
636
637 in = is_connected_input_ep(w);
638 dapm_clear_walk(w->codec);
639 out = is_connected_output_ep(w);
640 dapm_clear_walk(w->codec);
641 return out != 0 && in != 0;
642}
643
644/* Check to see if an ADC has power */
645static int dapm_adc_check_power(struct snd_soc_dapm_widget *w)
646{
647 int in;
648
649 if (w->active) {
650 in = is_connected_input_ep(w);
651 dapm_clear_walk(w->codec);
652 return in != 0;
653 } else {
654 return dapm_generic_check_power(w);
655 }
656}
657
658/* Check to see if a DAC has power */
659static int dapm_dac_check_power(struct snd_soc_dapm_widget *w)
660{
661 int out;
662
663 if (w->active) {
664 out = is_connected_output_ep(w);
665 dapm_clear_walk(w->codec);
666 return out != 0;
667 } else {
668 return dapm_generic_check_power(w);
669 }
670}
671
672/* Check to see if a power supply is needed */
673static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
674{
675 struct snd_soc_dapm_path *path;
676 int power = 0;
677
678 /* Check if one of our outputs is connected */
679 list_for_each_entry(path, &w->sinks, list_source) {
680 if (path->sink && path->sink->power_check &&
681 path->sink->power_check(path->sink)) {
682 power = 1;
683 break;
684 }
685 }
686
687 dapm_clear_walk(w->codec);
688
689 return power;
690}
691
692/*
693 * Scan a single DAPM widget for a complete audio path and update the
694 * power status appropriately.
695 */
696static int dapm_power_widget(struct snd_soc_codec *codec, int event,
697 struct snd_soc_dapm_widget *w)
698{
699 int ret;
700
701 switch (w->id) {
702 case snd_soc_dapm_pre:
703 if (!w->event)
704 return 0;
705
706 if (event == SND_SOC_DAPM_STREAM_START) {
707 ret = w->event(w,
708 NULL, SND_SOC_DAPM_PRE_PMU);
709 if (ret < 0)
710 return ret;
711 } else if (event == SND_SOC_DAPM_STREAM_STOP) {
712 ret = w->event(w,
713 NULL, SND_SOC_DAPM_PRE_PMD);
714 if (ret < 0)
715 return ret;
716 }
717 return 0;
718
719 case snd_soc_dapm_post:
720 if (!w->event)
721 return 0;
722
723 if (event == SND_SOC_DAPM_STREAM_START) {
724 ret = w->event(w,
725 NULL, SND_SOC_DAPM_POST_PMU);
726 if (ret < 0)
727 return ret;
728 } else if (event == SND_SOC_DAPM_STREAM_STOP) {
729 ret = w->event(w,
730 NULL, SND_SOC_DAPM_POST_PMD);
731 if (ret < 0)
732 return ret;
733 }
734 return 0;
735
736 default:
737 return dapm_generic_apply_power(w);
738 }
739}
740
655/* 741/*
656 * Scan each dapm widget for complete audio path. 742 * Scan each dapm widget for complete audio path.
657 * A complete path is a route that has valid endpoints i.e.:- 743 * A complete path is a route that has valid endpoints i.e.:-
@@ -663,31 +749,102 @@ static int dapm_power_widget(struct snd_soc_codec *codec, int event,
663 */ 749 */
664static int dapm_power_widgets(struct snd_soc_codec *codec, int event) 750static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
665{ 751{
752 struct snd_soc_device *socdev = codec->socdev;
666 struct snd_soc_dapm_widget *w; 753 struct snd_soc_dapm_widget *w;
667 int i, c = 1, *seq = NULL, ret = 0; 754 int ret = 0;
668 755 int i, power;
669 /* do we have a sequenced stream event */ 756 int sys_power = 0;
670 if (event == SND_SOC_DAPM_STREAM_START) { 757
671 c = ARRAY_SIZE(dapm_up_seq); 758 INIT_LIST_HEAD(&codec->up_list);
672 seq = dapm_up_seq; 759 INIT_LIST_HEAD(&codec->down_list);
673 } else if (event == SND_SOC_DAPM_STREAM_STOP) { 760
674 c = ARRAY_SIZE(dapm_down_seq); 761 /* Check which widgets we need to power and store them in
675 seq = dapm_down_seq; 762 * lists indicating if they should be powered up or down.
763 */
764 list_for_each_entry(w, &codec->dapm_widgets, list) {
765 switch (w->id) {
766 case snd_soc_dapm_pre:
767 list_add_tail(&codec->down_list, &w->power_list);
768 break;
769 case snd_soc_dapm_post:
770 list_add_tail(&codec->up_list, &w->power_list);
771 break;
772
773 default:
774 if (!w->power_check)
775 continue;
776
777 power = w->power_check(w);
778 if (power)
779 sys_power = 1;
780
781 if (w->power == power)
782 continue;
783
784 if (power)
785 list_add_tail(&w->power_list, &codec->up_list);
786 else
787 list_add_tail(&w->power_list,
788 &codec->down_list);
789
790 w->power = power;
791 break;
792 }
676 } 793 }
677 794
678 for (i = 0; i < c; i++) { 795 /* If we're changing to all on or all off then prepare */
679 list_for_each_entry(w, &codec->dapm_widgets, list) { 796 if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) ||
797 (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) {
798 ret = snd_soc_dapm_set_bias_level(socdev,
799 SND_SOC_BIAS_PREPARE);
800 if (ret != 0)
801 pr_err("Failed to prepare bias: %d\n", ret);
802 }
680 803
804 /* Power down widgets first; try to avoid amplifying pops. */
805 for (i = 0; i < ARRAY_SIZE(dapm_down_seq); i++) {
806 list_for_each_entry(w, &codec->down_list, power_list) {
681 /* is widget in stream order */ 807 /* is widget in stream order */
682 if (seq && seq[i] && w->id != seq[i]) 808 if (w->id != dapm_down_seq[i])
683 continue; 809 continue;
684 810
685 ret = dapm_power_widget(codec, event, w); 811 ret = dapm_power_widget(codec, event, w);
686 if (ret != 0) 812 if (ret != 0)
687 return ret; 813 pr_err("Failed to power down %s: %d\n",
814 w->name, ret);
688 } 815 }
689 } 816 }
690 817
818 /* Now power up. */
819 for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) {
820 list_for_each_entry(w, &codec->up_list, power_list) {
821 /* is widget in stream order */
822 if (w->id != dapm_up_seq[i])
823 continue;
824
825 ret = dapm_power_widget(codec, event, w);
826 if (ret != 0)
827 pr_err("Failed to power up %s: %d\n",
828 w->name, ret);
829 }
830 }
831
832 /* If we just powered the last thing off drop to standby bias */
833 if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
834 ret = snd_soc_dapm_set_bias_level(socdev,
835 SND_SOC_BIAS_STANDBY);
836 if (ret != 0)
837 pr_err("Failed to apply standby bias: %d\n", ret);
838 }
839
840 /* If we just powered up then move to active bias */
841 if (codec->bias_level == SND_SOC_BIAS_PREPARE && sys_power) {
842 ret = snd_soc_dapm_set_bias_level(socdev,
843 SND_SOC_BIAS_ON);
844 if (ret != 0)
845 pr_err("Failed to apply active bias: %d\n", ret);
846 }
847
691 return 0; 848 return 0;
692} 849}
693 850
@@ -723,6 +880,7 @@ static void dbg_dump_dapm(struct snd_soc_codec* codec, const char *action)
723 case snd_soc_dapm_pga: 880 case snd_soc_dapm_pga:
724 case snd_soc_dapm_mixer: 881 case snd_soc_dapm_mixer:
725 case snd_soc_dapm_mixer_named_ctl: 882 case snd_soc_dapm_mixer_named_ctl:
883 case snd_soc_dapm_supply:
726 if (w->name) { 884 if (w->name) {
727 in = is_connected_input_ep(w); 885 in = is_connected_input_ep(w);
728 dapm_clear_walk(w->codec); 886 dapm_clear_walk(w->codec);
@@ -851,6 +1009,7 @@ static ssize_t dapm_widget_show(struct device *dev,
851 case snd_soc_dapm_pga: 1009 case snd_soc_dapm_pga:
852 case snd_soc_dapm_mixer: 1010 case snd_soc_dapm_mixer:
853 case snd_soc_dapm_mixer_named_ctl: 1011 case snd_soc_dapm_mixer_named_ctl:
1012 case snd_soc_dapm_supply:
854 if (w->name) 1013 if (w->name)
855 count += sprintf(buf + count, "%s: %s\n", 1014 count += sprintf(buf + count, "%s: %s\n",
856 w->name, w->power ? "On":"Off"); 1015 w->name, w->power ? "On":"Off");
@@ -883,16 +1042,12 @@ static DEVICE_ATTR(dapm_widget, 0444, dapm_widget_show, NULL);
883 1042
884int snd_soc_dapm_sys_add(struct device *dev) 1043int snd_soc_dapm_sys_add(struct device *dev)
885{ 1044{
886 if (!dapm_status)
887 return 0;
888 return device_create_file(dev, &dev_attr_dapm_widget); 1045 return device_create_file(dev, &dev_attr_dapm_widget);
889} 1046}
890 1047
891static void snd_soc_dapm_sys_remove(struct device *dev) 1048static void snd_soc_dapm_sys_remove(struct device *dev)
892{ 1049{
893 if (dapm_status) { 1050 device_remove_file(dev, &dev_attr_dapm_widget);
894 device_remove_file(dev, &dev_attr_dapm_widget);
895 }
896} 1051}
897 1052
898/* free all dapm widgets and resources */ 1053/* free all dapm widgets and resources */
@@ -1015,6 +1170,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
1015 case snd_soc_dapm_vmid: 1170 case snd_soc_dapm_vmid:
1016 case snd_soc_dapm_pre: 1171 case snd_soc_dapm_pre:
1017 case snd_soc_dapm_post: 1172 case snd_soc_dapm_post:
1173 case snd_soc_dapm_supply:
1018 list_add(&path->list, &codec->dapm_paths); 1174 list_add(&path->list, &codec->dapm_paths);
1019 list_add(&path->list_sink, &wsink->sources); 1175 list_add(&path->list_sink, &wsink->sources);
1020 list_add(&path->list_source, &wsource->sinks); 1176 list_add(&path->list_source, &wsource->sinks);
@@ -1108,15 +1264,22 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
1108 case snd_soc_dapm_switch: 1264 case snd_soc_dapm_switch:
1109 case snd_soc_dapm_mixer: 1265 case snd_soc_dapm_mixer:
1110 case snd_soc_dapm_mixer_named_ctl: 1266 case snd_soc_dapm_mixer_named_ctl:
1267 w->power_check = dapm_generic_check_power;
1111 dapm_new_mixer(codec, w); 1268 dapm_new_mixer(codec, w);
1112 break; 1269 break;
1113 case snd_soc_dapm_mux: 1270 case snd_soc_dapm_mux:
1114 case snd_soc_dapm_value_mux: 1271 case snd_soc_dapm_value_mux:
1272 w->power_check = dapm_generic_check_power;
1115 dapm_new_mux(codec, w); 1273 dapm_new_mux(codec, w);
1116 break; 1274 break;
1117 case snd_soc_dapm_adc: 1275 case snd_soc_dapm_adc:
1276 w->power_check = dapm_adc_check_power;
1277 break;
1118 case snd_soc_dapm_dac: 1278 case snd_soc_dapm_dac:
1279 w->power_check = dapm_dac_check_power;
1280 break;
1119 case snd_soc_dapm_pga: 1281 case snd_soc_dapm_pga:
1282 w->power_check = dapm_generic_check_power;
1120 dapm_new_pga(codec, w); 1283 dapm_new_pga(codec, w);
1121 break; 1284 break;
1122 case snd_soc_dapm_input: 1285 case snd_soc_dapm_input:
@@ -1126,6 +1289,10 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
1126 case snd_soc_dapm_hp: 1289 case snd_soc_dapm_hp:
1127 case snd_soc_dapm_mic: 1290 case snd_soc_dapm_mic:
1128 case snd_soc_dapm_line: 1291 case snd_soc_dapm_line:
1292 w->power_check = dapm_generic_check_power;
1293 break;
1294 case snd_soc_dapm_supply:
1295 w->power_check = dapm_supply_check_power;
1129 case snd_soc_dapm_vmid: 1296 case snd_soc_dapm_vmid:
1130 case snd_soc_dapm_pre: 1297 case snd_soc_dapm_pre:
1131 case snd_soc_dapm_post: 1298 case snd_soc_dapm_post:
@@ -1626,35 +1793,11 @@ int snd_soc_dapm_stream_event(struct snd_soc_codec *codec,
1626EXPORT_SYMBOL_GPL(snd_soc_dapm_stream_event); 1793EXPORT_SYMBOL_GPL(snd_soc_dapm_stream_event);
1627 1794
1628/** 1795/**
1629 * snd_soc_dapm_set_bias_level - set the bias level for the system
1630 * @socdev: audio device
1631 * @level: level to configure
1632 *
1633 * Configure the bias (power) levels for the SoC audio device.
1634 *
1635 * Returns 0 for success else error.
1636 */
1637int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev,
1638 enum snd_soc_bias_level level)
1639{
1640 struct snd_soc_card *card = socdev->card;
1641 struct snd_soc_codec *codec = socdev->card->codec;
1642 int ret = 0;
1643
1644 if (card->set_bias_level)
1645 ret = card->set_bias_level(card, level);
1646 if (ret == 0 && codec->set_bias_level)
1647 ret = codec->set_bias_level(codec, level);
1648
1649 return ret;
1650}
1651
1652/**
1653 * snd_soc_dapm_enable_pin - enable pin. 1796 * snd_soc_dapm_enable_pin - enable pin.
1654 * @codec: SoC codec 1797 * @codec: SoC codec
1655 * @pin: pin name 1798 * @pin: pin name
1656 * 1799 *
1657 * Enables input/output pin and it's parents or children widgets iff there is 1800 * Enables input/output pin and its parents or children widgets iff there is
1658 * a valid audio route and active audio stream. 1801 * a valid audio route and active audio stream.
1659 * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 1802 * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
1660 * do any widget power switching. 1803 * do any widget power switching.
@@ -1670,7 +1813,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
1670 * @codec: SoC codec 1813 * @codec: SoC codec
1671 * @pin: pin name 1814 * @pin: pin name
1672 * 1815 *
1673 * Disables input/output pin and it's parents or children widgets. 1816 * Disables input/output pin and its parents or children widgets.
1674 * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 1817 * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
1675 * do any widget power switching. 1818 * do any widget power switching.
1676 */ 1819 */
diff --git a/sound/soc/txx9/Kconfig b/sound/soc/txx9/Kconfig
new file mode 100644
index 000000000000..ebc9327eae71
--- /dev/null
+++ b/sound/soc/txx9/Kconfig
@@ -0,0 +1,29 @@
1##
2## TXx9 ACLC
3##
4config SND_SOC_TXX9ACLC
5 tristate "SoC Audio for TXx9"
6 depends on HAS_TXX9_ACLC && TXX9_DMAC
7 help
8 This option enables support for the AC Link Controllers in TXx9 SoC.
9
10config HAS_TXX9_ACLC
11 bool
12
13config SND_SOC_TXX9ACLC_AC97
14 tristate
15 select AC97_BUS
16 select SND_AC97_CODEC
17 select SND_SOC_AC97_BUS
18
19
20##
21## Boards
22##
23config SND_SOC_TXX9ACLC_GENERIC
24 tristate "Generic TXx9 ACLC sound machine"
25 depends on SND_SOC_TXX9ACLC
26 select SND_SOC_TXX9ACLC_AC97
27 select SND_SOC_AC97_CODEC
28 help
29 This is a generic AC97 sound machine for use in TXx9 based systems.
diff --git a/sound/soc/txx9/Makefile b/sound/soc/txx9/Makefile
new file mode 100644
index 000000000000..551f16c0c4f9
--- /dev/null
+++ b/sound/soc/txx9/Makefile
@@ -0,0 +1,11 @@
1# Platform
2snd-soc-txx9aclc-objs := txx9aclc.o
3snd-soc-txx9aclc-ac97-objs := txx9aclc-ac97.o
4
5obj-$(CONFIG_SND_SOC_TXX9ACLC) += snd-soc-txx9aclc.o
6obj-$(CONFIG_SND_SOC_TXX9ACLC_AC97) += snd-soc-txx9aclc-ac97.o
7
8# Machine
9snd-soc-txx9aclc-generic-objs := txx9aclc-generic.o
10
11obj-$(CONFIG_SND_SOC_TXX9ACLC_GENERIC) += snd-soc-txx9aclc-generic.o
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
new file mode 100644
index 000000000000..0f83bdb9b16f
--- /dev/null
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -0,0 +1,255 @@
1/*
2 * TXx9 ACLC AC97 driver
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * Based on RBTX49xx patch from CELF patch archive.
7 * (C) Copyright TOSHIBA CORPORATION 2004-2006
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/delay.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <sound/core.h>
20#include <sound/pcm.h>
21#include <sound/soc.h>
22#include "txx9aclc.h"
23
24#define AC97_DIR \
25 (SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE)
26
27#define AC97_RATES \
28 SNDRV_PCM_RATE_8000_48000
29
30#ifdef __BIG_ENDIAN
31#define AC97_FMTS SNDRV_PCM_FMTBIT_S16_BE
32#else
33#define AC97_FMTS SNDRV_PCM_FMTBIT_S16_LE
34#endif
35
36static DECLARE_WAIT_QUEUE_HEAD(ac97_waitq);
37
38/* REVISIT: How to find txx9aclc_soc_device from snd_ac97? */
39static struct txx9aclc_soc_device *txx9aclc_soc_dev;
40
41static int txx9aclc_regready(struct txx9aclc_soc_device *dev)
42{
43 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
44
45 return __raw_readl(drvdata->base + ACINTSTS) & ACINT_REGACCRDY;
46}
47
48/* AC97 controller reads codec register */
49static unsigned short txx9aclc_ac97_read(struct snd_ac97 *ac97,
50 unsigned short reg)
51{
52 struct txx9aclc_soc_device *dev = txx9aclc_soc_dev;
53 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
54 void __iomem *base = drvdata->base;
55 u32 dat;
56
57 if (!(__raw_readl(base + ACINTSTS) & ACINT_CODECRDY(ac97->num)))
58 return 0xffff;
59 reg |= ac97->num << 7;
60 dat = (reg << ACREGACC_REG_SHIFT) | ACREGACC_READ;
61 __raw_writel(dat, base + ACREGACC);
62 __raw_writel(ACINT_REGACCRDY, base + ACINTEN);
63 if (!wait_event_timeout(ac97_waitq, txx9aclc_regready(dev), HZ)) {
64 __raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
65 dev_err(dev->soc_dev.dev, "ac97 read timeout (reg %#x)\n", reg);
66 dat = 0xffff;
67 goto done;
68 }
69 dat = __raw_readl(base + ACREGACC);
70 if (((dat >> ACREGACC_REG_SHIFT) & 0xff) != reg) {
71 dev_err(dev->soc_dev.dev, "reg mismatch %x with %x\n",
72 dat, reg);
73 dat = 0xffff;
74 goto done;
75 }
76 dat = (dat >> ACREGACC_DAT_SHIFT) & 0xffff;
77done:
78 __raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
79 return dat;
80}
81
82/* AC97 controller writes to codec register */
83static void txx9aclc_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
84 unsigned short val)
85{
86 struct txx9aclc_soc_device *dev = txx9aclc_soc_dev;
87 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
88 void __iomem *base = drvdata->base;
89
90 __raw_writel(((reg | (ac97->num << 7)) << ACREGACC_REG_SHIFT) |
91 (val << ACREGACC_DAT_SHIFT),
92 base + ACREGACC);
93 __raw_writel(ACINT_REGACCRDY, base + ACINTEN);
94 if (!wait_event_timeout(ac97_waitq, txx9aclc_regready(dev), HZ)) {
95 dev_err(dev->soc_dev.dev,
96 "ac97 write timeout (reg %#x)\n", reg);
97 }
98 __raw_writel(ACINT_REGACCRDY, base + ACINTDIS);
99}
100
101static void txx9aclc_ac97_cold_reset(struct snd_ac97 *ac97)
102{
103 struct txx9aclc_soc_device *dev = txx9aclc_soc_dev;
104 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
105 void __iomem *base = drvdata->base;
106 u32 ready = ACINT_CODECRDY(ac97->num) | ACINT_REGACCRDY;
107
108 __raw_writel(ACCTL_ENLINK, base + ACCTLDIS);
109 mmiowb();
110 udelay(1);
111 __raw_writel(ACCTL_ENLINK, base + ACCTLEN);
112 /* wait for primary codec ready status */
113 __raw_writel(ready, base + ACINTEN);
114 if (!wait_event_timeout(ac97_waitq,
115 (__raw_readl(base + ACINTSTS) & ready) == ready,
116 HZ)) {
117 dev_err(&ac97->dev, "primary codec is not ready "
118 "(status %#x)\n",
119 __raw_readl(base + ACINTSTS));
120 }
121 __raw_writel(ACINT_REGACCRDY, base + ACINTSTS);
122 __raw_writel(ready, base + ACINTDIS);
123}
124
125/* AC97 controller operations */
126struct snd_ac97_bus_ops soc_ac97_ops = {
127 .read = txx9aclc_ac97_read,
128 .write = txx9aclc_ac97_write,
129 .reset = txx9aclc_ac97_cold_reset,
130};
131EXPORT_SYMBOL_GPL(soc_ac97_ops);
132
133static irqreturn_t txx9aclc_ac97_irq(int irq, void *dev_id)
134{
135 struct txx9aclc_plat_drvdata *drvdata = dev_id;
136 void __iomem *base = drvdata->base;
137
138 __raw_writel(__raw_readl(base + ACINTMSTS), base + ACINTDIS);
139 wake_up(&ac97_waitq);
140 return IRQ_HANDLED;
141}
142
143static int txx9aclc_ac97_probe(struct platform_device *pdev,
144 struct snd_soc_dai *dai)
145{
146 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
147 struct txx9aclc_soc_device *dev =
148 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
149
150 dev->aclc_pdev = to_platform_device(dai->dev);
151 txx9aclc_soc_dev = dev;
152 return 0;
153}
154
155static void txx9aclc_ac97_remove(struct platform_device *pdev,
156 struct snd_soc_dai *dai)
157{
158 struct platform_device *aclc_pdev = to_platform_device(dai->dev);
159 struct txx9aclc_plat_drvdata *drvdata = platform_get_drvdata(aclc_pdev);
160
161 /* disable AC-link */
162 __raw_writel(ACCTL_ENLINK, drvdata->base + ACCTLDIS);
163 txx9aclc_soc_dev = NULL;
164}
165
166struct snd_soc_dai txx9aclc_ac97_dai = {
167 .name = "txx9aclc_ac97",
168 .ac97_control = 1,
169 .probe = txx9aclc_ac97_probe,
170 .remove = txx9aclc_ac97_remove,
171 .playback = {
172 .rates = AC97_RATES,
173 .formats = AC97_FMTS,
174 .channels_min = 2,
175 .channels_max = 2,
176 },
177 .capture = {
178 .rates = AC97_RATES,
179 .formats = AC97_FMTS,
180 .channels_min = 2,
181 .channels_max = 2,
182 },
183};
184EXPORT_SYMBOL_GPL(txx9aclc_ac97_dai);
185
186static int __devinit txx9aclc_ac97_dev_probe(struct platform_device *pdev)
187{
188 struct txx9aclc_plat_drvdata *drvdata;
189 struct resource *r;
190 int err;
191 int irq;
192
193 irq = platform_get_irq(pdev, 0);
194 if (irq < 0)
195 return irq;
196 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
197 if (!r)
198 return -EBUSY;
199
200 if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
201 dev_name(&pdev->dev)))
202 return -EBUSY;
203
204 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
205 if (!drvdata)
206 return -ENOMEM;
207 platform_set_drvdata(pdev, drvdata);
208 drvdata->physbase = r->start;
209 if (sizeof(drvdata->physbase) > sizeof(r->start) &&
210 r->start >= TXX9_DIRECTMAP_BASE &&
211 r->start < TXX9_DIRECTMAP_BASE + 0x400000)
212 drvdata->physbase |= 0xf00000000ull;
213 drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
214 if (!drvdata->base)
215 return -EBUSY;
216 err = devm_request_irq(&pdev->dev, irq, txx9aclc_ac97_irq,
217 IRQF_DISABLED, dev_name(&pdev->dev), drvdata);
218 if (err < 0)
219 return err;
220
221 txx9aclc_ac97_dai.dev = &pdev->dev;
222 return snd_soc_register_dai(&txx9aclc_ac97_dai);
223}
224
225static int __devexit txx9aclc_ac97_dev_remove(struct platform_device *pdev)
226{
227 snd_soc_unregister_dai(&txx9aclc_ac97_dai);
228 return 0;
229}
230
231static struct platform_driver txx9aclc_ac97_driver = {
232 .probe = txx9aclc_ac97_dev_probe,
233 .remove = __devexit_p(txx9aclc_ac97_dev_remove),
234 .driver = {
235 .name = "txx9aclc-ac97",
236 .owner = THIS_MODULE,
237 },
238};
239
240static int __init txx9aclc_ac97_init(void)
241{
242 return platform_driver_register(&txx9aclc_ac97_driver);
243}
244
245static void __exit txx9aclc_ac97_exit(void)
246{
247 platform_driver_unregister(&txx9aclc_ac97_driver);
248}
249
250module_init(txx9aclc_ac97_init);
251module_exit(txx9aclc_ac97_exit);
252
253MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
254MODULE_DESCRIPTION("TXx9 ACLC AC97 driver");
255MODULE_LICENSE("GPL");
diff --git a/sound/soc/txx9/txx9aclc-generic.c b/sound/soc/txx9/txx9aclc-generic.c
new file mode 100644
index 000000000000..3175de9a92cb
--- /dev/null
+++ b/sound/soc/txx9/txx9aclc-generic.c
@@ -0,0 +1,98 @@
1/*
2 * Generic TXx9 ACLC machine driver
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * Based on RBTX49xx patch from CELF patch archive.
7 * (C) Copyright TOSHIBA CORPORATION 2004-2006
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This is a very generic AC97 sound machine driver for boards which
14 * have (AC97) audio at ACLC (e.g. RBTX49XX boards).
15 */
16
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <sound/core.h>
20#include <sound/pcm.h>
21#include <sound/soc.h>
22#include "../codecs/ac97.h"
23#include "txx9aclc.h"
24
25static struct snd_soc_dai_link txx9aclc_generic_dai = {
26 .name = "AC97",
27 .stream_name = "AC97 HiFi",
28 .cpu_dai = &txx9aclc_ac97_dai,
29 .codec_dai = &ac97_dai,
30};
31
32static struct snd_soc_card txx9aclc_generic_card = {
33 .name = "Generic TXx9 ACLC Audio",
34 .platform = &txx9aclc_soc_platform,
35 .dai_link = &txx9aclc_generic_dai,
36 .num_links = 1,
37};
38
39static struct txx9aclc_soc_device txx9aclc_generic_soc_device = {
40 .soc_dev = {
41 .card = &txx9aclc_generic_card,
42 .codec_dev = &soc_codec_dev_ac97,
43 },
44};
45
46static int __init txx9aclc_generic_probe(struct platform_device *pdev)
47{
48 struct txx9aclc_soc_device *dev = &txx9aclc_generic_soc_device;
49 struct platform_device *soc_pdev;
50 int ret;
51
52 soc_pdev = platform_device_alloc("soc-audio", -1);
53 if (!soc_pdev)
54 return -ENOMEM;
55 platform_set_drvdata(soc_pdev, &dev->soc_dev);
56 dev->soc_dev.dev = &soc_pdev->dev;
57 ret = platform_device_add(soc_pdev);
58 if (ret) {
59 platform_device_put(soc_pdev);
60 return ret;
61 }
62 platform_set_drvdata(pdev, soc_pdev);
63 return 0;
64}
65
66static int __exit txx9aclc_generic_remove(struct platform_device *pdev)
67{
68 struct platform_device *soc_pdev = platform_get_drvdata(pdev);
69
70 platform_device_unregister(soc_pdev);
71 return 0;
72}
73
74static struct platform_driver txx9aclc_generic_driver = {
75 .remove = txx9aclc_generic_remove,
76 .driver = {
77 .name = "txx9aclc-generic",
78 .owner = THIS_MODULE,
79 },
80};
81
82static int __init txx9aclc_generic_init(void)
83{
84 return platform_driver_probe(&txx9aclc_generic_driver,
85 txx9aclc_generic_probe);
86}
87
88static void __exit txx9aclc_generic_exit(void)
89{
90 platform_driver_unregister(&txx9aclc_generic_driver);
91}
92
93module_init(txx9aclc_generic_init);
94module_exit(txx9aclc_generic_exit);
95
96MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
97MODULE_DESCRIPTION("Generic TXx9 ACLC ALSA SoC audio driver");
98MODULE_LICENSE("GPL");
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
new file mode 100644
index 000000000000..fa336616152e
--- /dev/null
+++ b/sound/soc/txx9/txx9aclc.c
@@ -0,0 +1,430 @@
1/*
2 * Generic TXx9 ACLC platform driver
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * Based on RBTX49xx patch from CELF patch archive.
7 * (C) Copyright TOSHIBA CORPORATION 2004-2006
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/scatterlist.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include "txx9aclc.h"
23
24static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
25 /*
26 * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
27 * needs more works for noncoherent MIPS.
28 */
29 .info = SNDRV_PCM_INFO_INTERLEAVED |
30 SNDRV_PCM_INFO_BATCH |
31 SNDRV_PCM_INFO_PAUSE,
32#ifdef __BIG_ENDIAN
33 .formats = SNDRV_PCM_FMTBIT_S16_BE,
34#else
35 .formats = SNDRV_PCM_FMTBIT_S16_LE,
36#endif
37 .period_bytes_min = 1024,
38 .period_bytes_max = 8 * 1024,
39 .periods_min = 2,
40 .periods_max = 4096,
41 .buffer_bytes_max = 32 * 1024,
42};
43
44static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
45 struct snd_pcm_hw_params *params)
46{
47 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
48 struct snd_soc_device *socdev = rtd->socdev;
49 struct snd_pcm_runtime *runtime = substream->runtime;
50 struct txx9aclc_dmadata *dmadata = runtime->private_data;
51 int ret;
52
53 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
54 if (ret < 0)
55 return ret;
56
57 dev_dbg(socdev->dev,
58 "runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd "
59 "runtime->min_align %ld\n",
60 (unsigned long)runtime->dma_area,
61 (unsigned long)runtime->dma_addr, runtime->dma_bytes,
62 runtime->min_align);
63 dev_dbg(socdev->dev,
64 "periods %d period_bytes %d stream %d\n",
65 params_periods(params), params_period_bytes(params),
66 substream->stream);
67
68 dmadata->substream = substream;
69 dmadata->pos = 0;
70 return 0;
71}
72
73static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
74{
75 return snd_pcm_lib_free_pages(substream);
76}
77
78static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
79{
80 struct snd_pcm_runtime *runtime = substream->runtime;
81 struct txx9aclc_dmadata *dmadata = runtime->private_data;
82
83 dmadata->dma_addr = runtime->dma_addr;
84 dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
85 dmadata->period_bytes = snd_pcm_lib_period_bytes(substream);
86
87 if (dmadata->buffer_bytes == dmadata->period_bytes) {
88 dmadata->frag_bytes = dmadata->period_bytes >> 1;
89 dmadata->frags = 2;
90 } else {
91 dmadata->frag_bytes = dmadata->period_bytes;
92 dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes;
93 }
94 dmadata->frag_count = 0;
95 dmadata->pos = 0;
96 return 0;
97}
98
99static void txx9aclc_dma_complete(void *arg)
100{
101 struct txx9aclc_dmadata *dmadata = arg;
102 unsigned long flags;
103
104 /* dma completion handler cannot submit new operations */
105 spin_lock_irqsave(&dmadata->dma_lock, flags);
106 if (dmadata->frag_count >= 0) {
107 dmadata->dmacount--;
108 BUG_ON(dmadata->dmacount < 0);
109 tasklet_schedule(&dmadata->tasklet);
110 }
111 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
112}
113
114static struct dma_async_tx_descriptor *
115txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
116{
117 struct dma_chan *chan = dmadata->dma_chan;
118 struct dma_async_tx_descriptor *desc;
119 struct scatterlist sg;
120
121 sg_init_table(&sg, 1);
122 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
123 dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
124 sg_dma_address(&sg) = buf_dma_addr;
125 desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
126 dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
127 DMA_TO_DEVICE : DMA_FROM_DEVICE,
128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
129 if (!desc) {
130 dev_err(&chan->dev->device, "cannot prepare slave dma\n");
131 return NULL;
132 }
133 desc->callback = txx9aclc_dma_complete;
134 desc->callback_param = dmadata;
135 desc->tx_submit(desc);
136 return desc;
137}
138
139#define NR_DMA_CHAIN 2
140
141static void txx9aclc_dma_tasklet(unsigned long data)
142{
143 struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
144 struct dma_chan *chan = dmadata->dma_chan;
145 struct dma_async_tx_descriptor *desc;
146 struct snd_pcm_substream *substream = dmadata->substream;
147 u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
148 ACCTL_AUDODMA : ACCTL_AUDIDMA;
149 int i;
150 unsigned long flags;
151
152 spin_lock_irqsave(&dmadata->dma_lock, flags);
153 if (dmadata->frag_count < 0) {
154 struct txx9aclc_soc_device *dev =
155 container_of(dmadata, struct txx9aclc_soc_device,
156 dmadata[substream->stream]);
157 struct txx9aclc_plat_drvdata *drvdata =
158 txx9aclc_get_plat_drvdata(dev);
159 void __iomem *base = drvdata->base;
160
161 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
162 chan->device->device_terminate_all(chan);
163 /* first time */
164 for (i = 0; i < NR_DMA_CHAIN; i++) {
165 desc = txx9aclc_dma_submit(dmadata,
166 dmadata->dma_addr + i * dmadata->frag_bytes);
167 if (!desc)
168 return;
169 }
170 dmadata->dmacount = NR_DMA_CHAIN;
171 chan->device->device_issue_pending(chan);
172 spin_lock_irqsave(&dmadata->dma_lock, flags);
173 __raw_writel(ctlbit, base + ACCTLEN);
174 dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
175 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
176 return;
177 }
178 BUG_ON(dmadata->dmacount >= NR_DMA_CHAIN);
179 while (dmadata->dmacount < NR_DMA_CHAIN) {
180 dmadata->dmacount++;
181 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
182 desc = txx9aclc_dma_submit(dmadata,
183 dmadata->dma_addr +
184 dmadata->frag_count * dmadata->frag_bytes);
185 if (!desc)
186 return;
187 chan->device->device_issue_pending(chan);
188
189 spin_lock_irqsave(&dmadata->dma_lock, flags);
190 dmadata->frag_count++;
191 dmadata->frag_count %= dmadata->frags;
192 dmadata->pos += dmadata->frag_bytes;
193 dmadata->pos %= dmadata->buffer_bytes;
194 if ((dmadata->frag_count * dmadata->frag_bytes) %
195 dmadata->period_bytes == 0)
196 snd_pcm_period_elapsed(substream);
197 }
198 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
199}
200
201static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
202{
203 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
204 struct snd_soc_pcm_runtime *rtd = substream->private_data;
205 struct txx9aclc_soc_device *dev =
206 container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
207 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
208 void __iomem *base = drvdata->base;
209 unsigned long flags;
210 int ret = 0;
211 u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
212 ACCTL_AUDODMA : ACCTL_AUDIDMA;
213
214 spin_lock_irqsave(&dmadata->dma_lock, flags);
215 switch (cmd) {
216 case SNDRV_PCM_TRIGGER_START:
217 dmadata->frag_count = -1;
218 tasklet_schedule(&dmadata->tasklet);
219 break;
220 case SNDRV_PCM_TRIGGER_STOP:
221 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
222 case SNDRV_PCM_TRIGGER_SUSPEND:
223 __raw_writel(ctlbit, base + ACCTLDIS);
224 break;
225 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
226 case SNDRV_PCM_TRIGGER_RESUME:
227 __raw_writel(ctlbit, base + ACCTLEN);
228 break;
229 default:
230 ret = -EINVAL;
231 }
232 spin_unlock_irqrestore(&dmadata->dma_lock, flags);
233 return ret;
234}
235
236static snd_pcm_uframes_t
237txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
238{
239 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
240
241 return bytes_to_frames(substream->runtime, dmadata->pos);
242}
243
244static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
245{
246 struct snd_soc_pcm_runtime *rtd = substream->private_data;
247 struct txx9aclc_soc_device *dev =
248 container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev);
249 struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
250 int ret;
251
252 ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware);
253 if (ret)
254 return ret;
255 /* ensure that buffer size is a multiple of period size */
256 ret = snd_pcm_hw_constraint_integer(substream->runtime,
257 SNDRV_PCM_HW_PARAM_PERIODS);
258 if (ret < 0)
259 return ret;
260 substream->runtime->private_data = dmadata;
261 return 0;
262}
263
264static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
265{
266 struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
267 struct dma_chan *chan = dmadata->dma_chan;
268
269 dmadata->frag_count = -1;
270 chan->device->device_terminate_all(chan);
271 return 0;
272}
273
274static struct snd_pcm_ops txx9aclc_pcm_ops = {
275 .open = txx9aclc_pcm_open,
276 .close = txx9aclc_pcm_close,
277 .ioctl = snd_pcm_lib_ioctl,
278 .hw_params = txx9aclc_pcm_hw_params,
279 .hw_free = txx9aclc_pcm_hw_free,
280 .prepare = txx9aclc_pcm_prepare,
281 .trigger = txx9aclc_pcm_trigger,
282 .pointer = txx9aclc_pcm_pointer,
283};
284
285static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm)
286{
287 snd_pcm_lib_preallocate_free_for_all(pcm);
288}
289
290static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
291 struct snd_pcm *pcm)
292{
293 return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
294 card->dev, 64 * 1024, 4 * 1024 * 1024);
295}
296
297static bool filter(struct dma_chan *chan, void *param)
298{
299 struct txx9aclc_dmadata *dmadata = param;
300 char devname[BUS_ID_SIZE + 2];
301
302 sprintf(devname, "%s.%d", dmadata->dma_res->name,
303 (int)dmadata->dma_res->start);
304 if (strcmp(dev_name(chan->device->dev), devname) == 0) {
305 chan->private = &dmadata->dma_slave;
306 return true;
307 }
308 return false;
309}
310
311static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
312 struct txx9aclc_dmadata *dmadata)
313{
314 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
315 struct txx9dmac_slave *ds = &dmadata->dma_slave;
316 dma_cap_mask_t mask;
317
318 spin_lock_init(&dmadata->dma_lock);
319
320 ds->reg_width = sizeof(u32);
321 if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) {
322 ds->tx_reg = drvdata->physbase + ACAUDODAT;
323 ds->rx_reg = 0;
324 } else {
325 ds->tx_reg = 0;
326 ds->rx_reg = drvdata->physbase + ACAUDIDAT;
327 }
328
329 /* Try to grab a DMA channel */
330 dma_cap_zero(mask);
331 dma_cap_set(DMA_SLAVE, mask);
332 dmadata->dma_chan = dma_request_channel(mask, filter, dmadata);
333 if (!dmadata->dma_chan) {
334 dev_err(dev->soc_dev.dev,
335 "DMA channel for %s is not available\n",
336 dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ?
337 "playback" : "capture");
338 return -EBUSY;
339 }
340 tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
341 (unsigned long)dmadata);
342 return 0;
343}
344
345static int txx9aclc_pcm_probe(struct platform_device *pdev)
346{
347 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
348 struct txx9aclc_soc_device *dev =
349 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
350 struct resource *r;
351 int i;
352 int ret;
353
354 dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK;
355 dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE;
356 for (i = 0; i < 2; i++) {
357 r = platform_get_resource(dev->aclc_pdev, IORESOURCE_DMA, i);
358 if (!r) {
359 ret = -EBUSY;
360 goto exit;
361 }
362 dev->dmadata[i].dma_res = r;
363 ret = txx9aclc_dma_init(dev, &dev->dmadata[i]);
364 if (ret)
365 goto exit;
366 }
367 return 0;
368
369exit:
370 for (i = 0; i < 2; i++) {
371 if (dev->dmadata[i].dma_chan)
372 dma_release_channel(dev->dmadata[i].dma_chan);
373 dev->dmadata[i].dma_chan = NULL;
374 }
375 return ret;
376}
377
378static int txx9aclc_pcm_remove(struct platform_device *pdev)
379{
380 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
381 struct txx9aclc_soc_device *dev =
382 container_of(socdev, struct txx9aclc_soc_device, soc_dev);
383 struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev);
384 void __iomem *base = drvdata->base;
385 int i;
386
387 /* disable all FIFO DMAs */
388 __raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS);
389 /* dummy R/W to clear pending DMAREQ if any */
390 __raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT);
391
392 for (i = 0; i < 2; i++) {
393 struct txx9aclc_dmadata *dmadata = &dev->dmadata[i];
394 struct dma_chan *chan = dmadata->dma_chan;
395 if (chan) {
396 dmadata->frag_count = -1;
397 chan->device->device_terminate_all(chan);
398 dma_release_channel(chan);
399 }
400 dev->dmadata[i].dma_chan = NULL;
401 }
402 return 0;
403}
404
405struct snd_soc_platform txx9aclc_soc_platform = {
406 .name = "txx9aclc-audio",
407 .probe = txx9aclc_pcm_probe,
408 .remove = txx9aclc_pcm_remove,
409 .pcm_ops = &txx9aclc_pcm_ops,
410 .pcm_new = txx9aclc_pcm_new,
411 .pcm_free = txx9aclc_pcm_free_dma_buffers,
412};
413EXPORT_SYMBOL_GPL(txx9aclc_soc_platform);
414
415static int __init txx9aclc_soc_platform_init(void)
416{
417 return snd_soc_register_platform(&txx9aclc_soc_platform);
418}
419
420static void __exit txx9aclc_soc_platform_exit(void)
421{
422 snd_soc_unregister_platform(&txx9aclc_soc_platform);
423}
424
425module_init(txx9aclc_soc_platform_init);
426module_exit(txx9aclc_soc_platform_exit);
427
428MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
429MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver");
430MODULE_LICENSE("GPL");
diff --git a/sound/soc/txx9/txx9aclc.h b/sound/soc/txx9/txx9aclc.h
new file mode 100644
index 000000000000..6769aab41b33
--- /dev/null
+++ b/sound/soc/txx9/txx9aclc.h
@@ -0,0 +1,83 @@
1/*
2 * TXx9 SoC AC Link Controller
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __TXX9ACLC_H
10#define __TXX9ACLC_H
11
12#include <linux/interrupt.h>
13#include <asm/txx9/dmac.h>
14
15#define ACCTLEN 0x00 /* control enable */
16#define ACCTLDIS 0x04 /* control disable */
17#define ACCTL_ENLINK 0x00000001 /* enable/disable AC-link */
18#define ACCTL_AUDODMA 0x00000100 /* AUDODMA enable/disable */
19#define ACCTL_AUDIDMA 0x00001000 /* AUDIDMA enable/disable */
20#define ACCTL_AUDOEHLT 0x00010000 /* AUDO error halt
21 enable/disable */
22#define ACCTL_AUDIEHLT 0x00100000 /* AUDI error halt
23 enable/disable */
24#define ACREGACC 0x08 /* codec register access */
25#define ACREGACC_DAT_SHIFT 0 /* data field */
26#define ACREGACC_REG_SHIFT 16 /* address field */
27#define ACREGACC_CODECID_SHIFT 24 /* CODEC ID field */
28#define ACREGACC_READ 0x80000000 /* CODEC read */
29#define ACREGACC_WRITE 0x00000000 /* CODEC write */
30#define ACINTSTS 0x10 /* interrupt status */
31#define ACINTMSTS 0x14 /* interrupt masked status */
32#define ACINTEN 0x18 /* interrupt enable */
33#define ACINTDIS 0x1c /* interrupt disable */
34#define ACINT_CODECRDY(n) (0x00000001 << (n)) /* CODECn ready */
35#define ACINT_REGACCRDY 0x00000010 /* ACREGACC ready */
36#define ACINT_AUDOERR 0x00000100 /* AUDO underrun error */
37#define ACINT_AUDIERR 0x00001000 /* AUDI overrun error */
38#define ACDMASTS 0x80 /* DMA request status */
39#define ACDMA_AUDO 0x00000001 /* AUDODMA pending */
40#define ACDMA_AUDI 0x00000010 /* AUDIDMA pending */
41#define ACAUDODAT 0xa0 /* audio out data */
42#define ACAUDIDAT 0xb0 /* audio in data */
43#define ACREVID 0xfc /* revision ID */
44
45struct txx9aclc_dmadata {
46 struct resource *dma_res;
47 struct txx9dmac_slave dma_slave;
48 struct dma_chan *dma_chan;
49 struct tasklet_struct tasklet;
50 spinlock_t dma_lock;
51 int stream; /* SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE */
52 struct snd_pcm_substream *substream;
53 unsigned long pos;
54 dma_addr_t dma_addr;
55 unsigned long buffer_bytes;
56 unsigned long period_bytes;
57 unsigned long frag_bytes;
58 int frags;
59 int frag_count;
60 int dmacount;
61};
62
63struct txx9aclc_plat_drvdata {
64 void __iomem *base;
65 u64 physbase;
66};
67
68struct txx9aclc_soc_device {
69 struct snd_soc_device soc_dev;
70 struct platform_device *aclc_pdev; /* for ioresources, drvdata */
71 struct txx9aclc_dmadata dmadata[2];
72};
73
74static inline struct txx9aclc_plat_drvdata *txx9aclc_get_plat_drvdata(
75 struct txx9aclc_soc_device *sdev)
76{
77 return platform_get_drvdata(sdev->aclc_pdev);
78}
79
80extern struct snd_soc_platform txx9aclc_soc_platform;
81extern struct snd_soc_dai txx9aclc_ac97_dai;
82
83#endif /* __TXX9ACLC_H */
diff --git a/sound/synth/Makefile b/sound/synth/Makefile
index e99fd76caa17..11eb06ac2eca 100644
--- a/sound/synth/Makefile
+++ b/sound/synth/Makefile
@@ -5,16 +5,8 @@
5 5
6snd-util-mem-objs := util_mem.o 6snd-util-mem-objs := util_mem.o
7 7
8#
9# this function returns:
10# "m" - CONFIG_SND_SEQUENCER is m
11# <empty string> - CONFIG_SND_SEQUENCER is undefined
12# otherwise parameter #1 value
13#
14sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
15
16# Toplevel Module Dependency 8# Toplevel Module Dependency
17obj-$(CONFIG_SND_EMU10K1) += snd-util-mem.o 9obj-$(CONFIG_SND_EMU10K1) += snd-util-mem.o
18obj-$(CONFIG_SND_TRIDENT) += snd-util-mem.o 10obj-$(CONFIG_SND_TRIDENT) += snd-util-mem.o
19obj-$(call sequencer,$(CONFIG_SND_SBAWE)) += snd-util-mem.o 11obj-$(CONFIG_SND_SBAWE_SEQ) += snd-util-mem.o
20obj-$(call sequencer,$(CONFIG_SND)) += emux/ 12obj-$(CONFIG_SND_SEQUENCER) += emux/
diff --git a/sound/synth/emux/Makefile b/sound/synth/emux/Makefile
index b69035240cf6..328594e6152d 100644
--- a/sound/synth/emux/Makefile
+++ b/sound/synth/emux/Makefile
@@ -7,14 +7,6 @@ snd-emux-synth-objs := emux.o emux_synth.o emux_seq.o emux_nrpn.o \
7 emux_effect.o emux_proc.o emux_hwdep.o soundfont.o \ 7 emux_effect.o emux_proc.o emux_hwdep.o soundfont.o \
8 $(if $(CONFIG_SND_SEQUENCER_OSS),emux_oss.o) 8 $(if $(CONFIG_SND_SEQUENCER_OSS),emux_oss.o)
9 9
10#
11# this function returns:
12# "m" - CONFIG_SND_SEQUENCER is m
13# <empty string> - CONFIG_SND_SEQUENCER is undefined
14# otherwise parameter #1 value
15#
16sequencer = $(if $(subst y,,$(CONFIG_SND_SEQUENCER)),$(if $(1),m),$(if $(CONFIG_SND_SEQUENCER),$(1)))
17
18# Toplevel Module Dependencies 10# Toplevel Module Dependencies
19obj-$(call sequencer,$(CONFIG_SND_SBAWE)) += snd-emux-synth.o 11obj-$(CONFIG_SND_SBAWE_SEQ) += snd-emux-synth.o
20obj-$(call sequencer,$(CONFIG_SND_EMU10K1)) += snd-emux-synth.o 12obj-$(CONFIG_SND_EMU10K1_SEQ) += snd-emux-synth.o
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index b13ce767ac72..b14451342166 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -42,10 +42,10 @@
42 (stream << 1) | (~(i / (dev->n_streams * BYTES_PER_SAMPLE_USB)) & 1) 42 (stream << 1) | (~(i / (dev->n_streams * BYTES_PER_SAMPLE_USB)) & 1)
43 43
44static struct snd_pcm_hardware snd_usb_caiaq_pcm_hardware = { 44static struct snd_pcm_hardware snd_usb_caiaq_pcm_hardware = {
45 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 45 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
46 SNDRV_PCM_INFO_BLOCK_TRANSFER), 46 SNDRV_PCM_INFO_BLOCK_TRANSFER),
47 .formats = SNDRV_PCM_FMTBIT_S24_3BE, 47 .formats = SNDRV_PCM_FMTBIT_S24_3BE,
48 .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | 48 .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
49 SNDRV_PCM_RATE_96000), 49 SNDRV_PCM_RATE_96000),
50 .rate_min = 44100, 50 .rate_min = 44100,
51 .rate_max = 0, /* will overwrite later */ 51 .rate_max = 0, /* will overwrite later */
@@ -68,7 +68,7 @@ activate_substream(struct snd_usb_caiaqdev *dev,
68 dev->sub_capture[sub->number] = sub; 68 dev->sub_capture[sub->number] = sub;
69} 69}
70 70
71static void 71static void
72deactivate_substream(struct snd_usb_caiaqdev *dev, 72deactivate_substream(struct snd_usb_caiaqdev *dev,
73 struct snd_pcm_substream *sub) 73 struct snd_pcm_substream *sub)
74{ 74{
@@ -118,7 +118,7 @@ static int stream_start(struct snd_usb_caiaqdev *dev)
118 return -EPIPE; 118 return -EPIPE;
119 } 119 }
120 } 120 }
121 121
122 return 0; 122 return 0;
123} 123}
124 124
@@ -129,7 +129,7 @@ static void stream_stop(struct snd_usb_caiaqdev *dev)
129 debug("%s(%p)\n", __func__, dev); 129 debug("%s(%p)\n", __func__, dev);
130 if (!dev->streaming) 130 if (!dev->streaming)
131 return; 131 return;
132 132
133 dev->streaming = 0; 133 dev->streaming = 0;
134 134
135 for (i = 0; i < N_URBS; i++) { 135 for (i = 0; i < N_URBS; i++) {
@@ -154,7 +154,7 @@ static int snd_usb_caiaq_substream_close(struct snd_pcm_substream *substream)
154 debug("%s(%p)\n", __func__, substream); 154 debug("%s(%p)\n", __func__, substream);
155 if (all_substreams_zero(dev->sub_playback) && 155 if (all_substreams_zero(dev->sub_playback) &&
156 all_substreams_zero(dev->sub_capture)) { 156 all_substreams_zero(dev->sub_capture)) {
157 /* when the last client has stopped streaming, 157 /* when the last client has stopped streaming,
158 * all sample rates are allowed again */ 158 * all sample rates are allowed again */
159 stream_stop(dev); 159 stream_stop(dev);
160 dev->pcm_info.rates = dev->samplerates; 160 dev->pcm_info.rates = dev->samplerates;
@@ -194,7 +194,7 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream)
194 struct snd_pcm_runtime *runtime = substream->runtime; 194 struct snd_pcm_runtime *runtime = substream->runtime;
195 195
196 debug("%s(%p)\n", __func__, substream); 196 debug("%s(%p)\n", __func__, substream);
197 197
198 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 198 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
199 dev->period_out_count[index] = BYTES_PER_SAMPLE + 1; 199 dev->period_out_count[index] = BYTES_PER_SAMPLE + 1;
200 dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1; 200 dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1;
@@ -205,19 +205,19 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream)
205 205
206 if (dev->streaming) 206 if (dev->streaming)
207 return 0; 207 return 0;
208 208
209 /* the first client that opens a stream defines the sample rate 209 /* the first client that opens a stream defines the sample rate
210 * setting for all subsequent calls, until the last client closed. */ 210 * setting for all subsequent calls, until the last client closed. */
211 for (i=0; i < ARRAY_SIZE(rates); i++) 211 for (i=0; i < ARRAY_SIZE(rates); i++)
212 if (runtime->rate == rates[i]) 212 if (runtime->rate == rates[i])
213 dev->pcm_info.rates = 1 << i; 213 dev->pcm_info.rates = 1 << i;
214 214
215 snd_pcm_limit_hw_rates(runtime); 215 snd_pcm_limit_hw_rates(runtime);
216 216
217 bytes_per_sample = BYTES_PER_SAMPLE; 217 bytes_per_sample = BYTES_PER_SAMPLE;
218 if (dev->spec.data_alignment == 2) 218 if (dev->spec.data_alignment == 2)
219 bytes_per_sample++; 219 bytes_per_sample++;
220 220
221 bpp = ((runtime->rate / 8000) + CLOCK_DRIFT_TOLERANCE) 221 bpp = ((runtime->rate / 8000) + CLOCK_DRIFT_TOLERANCE)
222 * bytes_per_sample * CHANNELS_PER_STREAM * dev->n_streams; 222 * bytes_per_sample * CHANNELS_PER_STREAM * dev->n_streams;
223 223
@@ -232,7 +232,7 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream)
232 ret = stream_start(dev); 232 ret = stream_start(dev);
233 if (ret) 233 if (ret)
234 return ret; 234 return ret;
235 235
236 dev->output_running = 0; 236 dev->output_running = 0;
237 wait_event_timeout(dev->prepare_wait_queue, dev->output_running, HZ); 237 wait_event_timeout(dev->prepare_wait_queue, dev->output_running, HZ);
238 if (!dev->output_running) { 238 if (!dev->output_running) {
@@ -273,7 +273,7 @@ snd_usb_caiaq_pcm_pointer(struct snd_pcm_substream *sub)
273 return SNDRV_PCM_POS_XRUN; 273 return SNDRV_PCM_POS_XRUN;
274 274
275 if (sub->stream == SNDRV_PCM_STREAM_PLAYBACK) 275 if (sub->stream == SNDRV_PCM_STREAM_PLAYBACK)
276 return bytes_to_frames(sub->runtime, 276 return bytes_to_frames(sub->runtime,
277 dev->audio_out_buf_pos[index]); 277 dev->audio_out_buf_pos[index]);
278 else 278 else
279 return bytes_to_frames(sub->runtime, 279 return bytes_to_frames(sub->runtime,
@@ -291,7 +291,7 @@ static struct snd_pcm_ops snd_usb_caiaq_ops = {
291 .trigger = snd_usb_caiaq_pcm_trigger, 291 .trigger = snd_usb_caiaq_pcm_trigger,
292 .pointer = snd_usb_caiaq_pcm_pointer 292 .pointer = snd_usb_caiaq_pcm_pointer
293}; 293};
294 294
295static void check_for_elapsed_periods(struct snd_usb_caiaqdev *dev, 295static void check_for_elapsed_periods(struct snd_usb_caiaqdev *dev,
296 struct snd_pcm_substream **subs) 296 struct snd_pcm_substream **subs)
297{ 297{
@@ -333,7 +333,7 @@ static void read_in_urb_mode0(struct snd_usb_caiaqdev *dev,
333 struct snd_pcm_runtime *rt = sub->runtime; 333 struct snd_pcm_runtime *rt = sub->runtime;
334 char *audio_buf = rt->dma_area; 334 char *audio_buf = rt->dma_area;
335 int sz = frames_to_bytes(rt, rt->buffer_size); 335 int sz = frames_to_bytes(rt, rt->buffer_size);
336 audio_buf[dev->audio_in_buf_pos[stream]++] 336 audio_buf[dev->audio_in_buf_pos[stream]++]
337 = usb_buf[i]; 337 = usb_buf[i];
338 dev->period_in_count[stream]++; 338 dev->period_in_count[stream]++;
339 if (dev->audio_in_buf_pos[stream] == sz) 339 if (dev->audio_in_buf_pos[stream] == sz)
@@ -354,14 +354,14 @@ static void read_in_urb_mode2(struct snd_usb_caiaqdev *dev,
354 354
355 for (i = 0; i < iso->actual_length;) { 355 for (i = 0; i < iso->actual_length;) {
356 if (i % (dev->n_streams * BYTES_PER_SAMPLE_USB) == 0) { 356 if (i % (dev->n_streams * BYTES_PER_SAMPLE_USB) == 0) {
357 for (stream = 0; 357 for (stream = 0;
358 stream < dev->n_streams; 358 stream < dev->n_streams;
359 stream++, i++) { 359 stream++, i++) {
360 if (dev->first_packet) 360 if (dev->first_packet)
361 continue; 361 continue;
362 362
363 check_byte = MAKE_CHECKBYTE(dev, stream, i); 363 check_byte = MAKE_CHECKBYTE(dev, stream, i);
364 364
365 if ((usb_buf[i] & 0x3f) != check_byte) 365 if ((usb_buf[i] & 0x3f) != check_byte)
366 dev->input_panic = 1; 366 dev->input_panic = 1;
367 367
@@ -410,21 +410,21 @@ static void read_in_urb(struct snd_usb_caiaqdev *dev,
410 } 410 }
411 411
412 if ((dev->input_panic || dev->output_panic) && !dev->warned) { 412 if ((dev->input_panic || dev->output_panic) && !dev->warned) {
413 debug("streaming error detected %s %s\n", 413 debug("streaming error detected %s %s\n",
414 dev->input_panic ? "(input)" : "", 414 dev->input_panic ? "(input)" : "",
415 dev->output_panic ? "(output)" : ""); 415 dev->output_panic ? "(output)" : "");
416 dev->warned = 1; 416 dev->warned = 1;
417 } 417 }
418} 418}
419 419
420static void fill_out_urb(struct snd_usb_caiaqdev *dev, 420static void fill_out_urb(struct snd_usb_caiaqdev *dev,
421 struct urb *urb, 421 struct urb *urb,
422 const struct usb_iso_packet_descriptor *iso) 422 const struct usb_iso_packet_descriptor *iso)
423{ 423{
424 unsigned char *usb_buf = urb->transfer_buffer + iso->offset; 424 unsigned char *usb_buf = urb->transfer_buffer + iso->offset;
425 struct snd_pcm_substream *sub; 425 struct snd_pcm_substream *sub;
426 int stream, i; 426 int stream, i;
427 427
428 for (i = 0; i < iso->length;) { 428 for (i = 0; i < iso->length;) {
429 for (stream = 0; stream < dev->n_streams; stream++, i++) { 429 for (stream = 0; stream < dev->n_streams; stream++, i++) {
430 sub = dev->sub_playback[stream]; 430 sub = dev->sub_playback[stream];
@@ -444,7 +444,7 @@ static void fill_out_urb(struct snd_usb_caiaqdev *dev,
444 444
445 /* fill in the check bytes */ 445 /* fill in the check bytes */
446 if (dev->spec.data_alignment == 2 && 446 if (dev->spec.data_alignment == 2 &&
447 i % (dev->n_streams * BYTES_PER_SAMPLE_USB) == 447 i % (dev->n_streams * BYTES_PER_SAMPLE_USB) ==
448 (dev->n_streams * CHANNELS_PER_STREAM)) 448 (dev->n_streams * CHANNELS_PER_STREAM))
449 for (stream = 0; stream < dev->n_streams; stream++, i++) 449 for (stream = 0; stream < dev->n_streams; stream++, i++)
450 usb_buf[i] = MAKE_CHECKBYTE(dev, stream, i); 450 usb_buf[i] = MAKE_CHECKBYTE(dev, stream, i);
@@ -453,7 +453,7 @@ static void fill_out_urb(struct snd_usb_caiaqdev *dev,
453 453
454static void read_completed(struct urb *urb) 454static void read_completed(struct urb *urb)
455{ 455{
456 struct snd_usb_caiaq_cb_info *info = urb->context; 456 struct snd_usb_caiaq_cb_info *info = urb->context;
457 struct snd_usb_caiaqdev *dev; 457 struct snd_usb_caiaqdev *dev;
458 struct urb *out; 458 struct urb *out;
459 int frame, len, send_it = 0, outframe = 0; 459 int frame, len, send_it = 0, outframe = 0;
@@ -478,7 +478,7 @@ static void read_completed(struct urb *urb)
478 out->iso_frame_desc[outframe].length = len; 478 out->iso_frame_desc[outframe].length = len;
479 out->iso_frame_desc[outframe].actual_length = 0; 479 out->iso_frame_desc[outframe].actual_length = 0;
480 out->iso_frame_desc[outframe].offset = BYTES_PER_FRAME * frame; 480 out->iso_frame_desc[outframe].offset = BYTES_PER_FRAME * frame;
481 481
482 if (len > 0) { 482 if (len > 0) {
483 spin_lock(&dev->spinlock); 483 spin_lock(&dev->spinlock);
484 fill_out_urb(dev, out, &out->iso_frame_desc[outframe]); 484 fill_out_urb(dev, out, &out->iso_frame_desc[outframe]);
@@ -497,14 +497,14 @@ static void read_completed(struct urb *urb)
497 out->transfer_flags = URB_ISO_ASAP; 497 out->transfer_flags = URB_ISO_ASAP;
498 usb_submit_urb(out, GFP_ATOMIC); 498 usb_submit_urb(out, GFP_ATOMIC);
499 } 499 }
500 500
501 /* re-submit inbound urb */ 501 /* re-submit inbound urb */
502 for (frame = 0; frame < FRAMES_PER_URB; frame++) { 502 for (frame = 0; frame < FRAMES_PER_URB; frame++) {
503 urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame; 503 urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame;
504 urb->iso_frame_desc[frame].length = BYTES_PER_FRAME; 504 urb->iso_frame_desc[frame].length = BYTES_PER_FRAME;
505 urb->iso_frame_desc[frame].actual_length = 0; 505 urb->iso_frame_desc[frame].actual_length = 0;
506 } 506 }
507 507
508 urb->number_of_packets = FRAMES_PER_URB; 508 urb->number_of_packets = FRAMES_PER_URB;
509 urb->transfer_flags = URB_ISO_ASAP; 509 urb->transfer_flags = URB_ISO_ASAP;
510 usb_submit_urb(urb, GFP_ATOMIC); 510 usb_submit_urb(urb, GFP_ATOMIC);
@@ -528,7 +528,7 @@ static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret)
528 struct usb_device *usb_dev = dev->chip.dev; 528 struct usb_device *usb_dev = dev->chip.dev;
529 unsigned int pipe; 529 unsigned int pipe;
530 530
531 pipe = (dir == SNDRV_PCM_STREAM_PLAYBACK) ? 531 pipe = (dir == SNDRV_PCM_STREAM_PLAYBACK) ?
532 usb_sndisocpipe(usb_dev, ENDPOINT_PLAYBACK) : 532 usb_sndisocpipe(usb_dev, ENDPOINT_PLAYBACK) :
533 usb_rcvisocpipe(usb_dev, ENDPOINT_CAPTURE); 533 usb_rcvisocpipe(usb_dev, ENDPOINT_CAPTURE);
534 534
@@ -547,25 +547,25 @@ static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret)
547 return urbs; 547 return urbs;
548 } 548 }
549 549
550 urbs[i]->transfer_buffer = 550 urbs[i]->transfer_buffer =
551 kmalloc(FRAMES_PER_URB * BYTES_PER_FRAME, GFP_KERNEL); 551 kmalloc(FRAMES_PER_URB * BYTES_PER_FRAME, GFP_KERNEL);
552 if (!urbs[i]->transfer_buffer) { 552 if (!urbs[i]->transfer_buffer) {
553 log("unable to kmalloc() transfer buffer, OOM!?\n"); 553 log("unable to kmalloc() transfer buffer, OOM!?\n");
554 *ret = -ENOMEM; 554 *ret = -ENOMEM;
555 return urbs; 555 return urbs;
556 } 556 }
557 557
558 for (frame = 0; frame < FRAMES_PER_URB; frame++) { 558 for (frame = 0; frame < FRAMES_PER_URB; frame++) {
559 struct usb_iso_packet_descriptor *iso = 559 struct usb_iso_packet_descriptor *iso =
560 &urbs[i]->iso_frame_desc[frame]; 560 &urbs[i]->iso_frame_desc[frame];
561 561
562 iso->offset = BYTES_PER_FRAME * frame; 562 iso->offset = BYTES_PER_FRAME * frame;
563 iso->length = BYTES_PER_FRAME; 563 iso->length = BYTES_PER_FRAME;
564 } 564 }
565 565
566 urbs[i]->dev = usb_dev; 566 urbs[i]->dev = usb_dev;
567 urbs[i]->pipe = pipe; 567 urbs[i]->pipe = pipe;
568 urbs[i]->transfer_buffer_length = FRAMES_PER_URB 568 urbs[i]->transfer_buffer_length = FRAMES_PER_URB
569 * BYTES_PER_FRAME; 569 * BYTES_PER_FRAME;
570 urbs[i]->context = &dev->data_cb_info[i]; 570 urbs[i]->context = &dev->data_cb_info[i];
571 urbs[i]->interval = 1; 571 urbs[i]->interval = 1;
@@ -589,7 +589,7 @@ static void free_urbs(struct urb **urbs)
589 for (i = 0; i < N_URBS; i++) { 589 for (i = 0; i < N_URBS; i++) {
590 if (!urbs[i]) 590 if (!urbs[i])
591 continue; 591 continue;
592 592
593 usb_kill_urb(urbs[i]); 593 usb_kill_urb(urbs[i]);
594 kfree(urbs[i]->transfer_buffer); 594 kfree(urbs[i]->transfer_buffer);
595 usb_free_urb(urbs[i]); 595 usb_free_urb(urbs[i]);
@@ -602,11 +602,11 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
602{ 602{
603 int i, ret; 603 int i, ret;
604 604
605 dev->n_audio_in = max(dev->spec.num_analog_audio_in, 605 dev->n_audio_in = max(dev->spec.num_analog_audio_in,
606 dev->spec.num_digital_audio_in) / 606 dev->spec.num_digital_audio_in) /
607 CHANNELS_PER_STREAM; 607 CHANNELS_PER_STREAM;
608 dev->n_audio_out = max(dev->spec.num_analog_audio_out, 608 dev->n_audio_out = max(dev->spec.num_analog_audio_out,
609 dev->spec.num_digital_audio_out) / 609 dev->spec.num_digital_audio_out) /
610 CHANNELS_PER_STREAM; 610 CHANNELS_PER_STREAM;
611 dev->n_streams = max(dev->n_audio_in, dev->n_audio_out); 611 dev->n_streams = max(dev->n_audio_in, dev->n_audio_out);
612 612
@@ -619,7 +619,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
619 return -EINVAL; 619 return -EINVAL;
620 } 620 }
621 621
622 ret = snd_pcm_new(dev->chip.card, dev->product_name, 0, 622 ret = snd_pcm_new(dev->chip.card, dev->product_name, 0,
623 dev->n_audio_out, dev->n_audio_in, &dev->pcm); 623 dev->n_audio_out, dev->n_audio_in, &dev->pcm);
624 624
625 if (ret < 0) { 625 if (ret < 0) {
@@ -632,7 +632,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
632 632
633 memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); 633 memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
634 memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); 634 memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
635 635
636 memcpy(&dev->pcm_info, &snd_usb_caiaq_pcm_hardware, 636 memcpy(&dev->pcm_info, &snd_usb_caiaq_pcm_hardware,
637 sizeof(snd_usb_caiaq_pcm_hardware)); 637 sizeof(snd_usb_caiaq_pcm_hardware));
638 638
@@ -651,9 +651,9 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
651 break; 651 break;
652 } 652 }
653 653
654 snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_PLAYBACK, 654 snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_PLAYBACK,
655 &snd_usb_caiaq_ops); 655 &snd_usb_caiaq_ops);
656 snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_CAPTURE, 656 snd_pcm_set_ops(dev->pcm, SNDRV_PCM_STREAM_CAPTURE,
657 &snd_usb_caiaq_ops); 657 &snd_usb_caiaq_ops);
658 658
659 snd_pcm_lib_preallocate_pages_for_all(dev->pcm, 659 snd_pcm_lib_preallocate_pages_for_all(dev->pcm,
@@ -662,7 +662,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
662 MAX_BUFFER_SIZE, MAX_BUFFER_SIZE); 662 MAX_BUFFER_SIZE, MAX_BUFFER_SIZE);
663 663
664 dev->data_cb_info = 664 dev->data_cb_info =
665 kmalloc(sizeof(struct snd_usb_caiaq_cb_info) * N_URBS, 665 kmalloc(sizeof(struct snd_usb_caiaq_cb_info) * N_URBS,
666 GFP_KERNEL); 666 GFP_KERNEL);
667 667
668 if (!dev->data_cb_info) 668 if (!dev->data_cb_info)
@@ -672,14 +672,14 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
672 dev->data_cb_info[i].dev = dev; 672 dev->data_cb_info[i].dev = dev;
673 dev->data_cb_info[i].index = i; 673 dev->data_cb_info[i].index = i;
674 } 674 }
675 675
676 dev->data_urbs_in = alloc_urbs(dev, SNDRV_PCM_STREAM_CAPTURE, &ret); 676 dev->data_urbs_in = alloc_urbs(dev, SNDRV_PCM_STREAM_CAPTURE, &ret);
677 if (ret < 0) { 677 if (ret < 0) {
678 kfree(dev->data_cb_info); 678 kfree(dev->data_cb_info);
679 free_urbs(dev->data_urbs_in); 679 free_urbs(dev->data_urbs_in);
680 return ret; 680 return ret;
681 } 681 }
682 682
683 dev->data_urbs_out = alloc_urbs(dev, SNDRV_PCM_STREAM_PLAYBACK, &ret); 683 dev->data_urbs_out = alloc_urbs(dev, SNDRV_PCM_STREAM_PLAYBACK, &ret);
684 if (ret < 0) { 684 if (ret < 0) {
685 kfree(dev->data_cb_info); 685 kfree(dev->data_cb_info);
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 515de1cd2a3e..22406245a98b 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -35,7 +35,7 @@
35#include "input.h" 35#include "input.h"
36 36
37MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 37MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
38MODULE_DESCRIPTION("caiaq USB audio, version 1.3.14"); 38MODULE_DESCRIPTION("caiaq USB audio, version 1.3.16");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," 40MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
41 "{Native Instruments, RigKontrol3}," 41 "{Native Instruments, RigKontrol3},"
@@ -79,7 +79,7 @@ static struct usb_device_id snd_usb_id_table[] = {
79 { 79 {
80 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, 80 .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
81 .idVendor = USB_VID_NATIVEINSTRUMENTS, 81 .idVendor = USB_VID_NATIVEINSTRUMENTS,
82 .idProduct = USB_PID_RIGKONTROL2 82 .idProduct = USB_PID_RIGKONTROL2
83 }, 83 },
84 { 84 {
85 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, 85 .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
@@ -197,7 +197,7 @@ int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *dev,
197 197
198 if (buffer && len > 0) 198 if (buffer && len > 0)
199 memcpy(dev->ep1_out_buf+1, buffer, len); 199 memcpy(dev->ep1_out_buf+1, buffer, len);
200 200
201 dev->ep1_out_buf[0] = command; 201 dev->ep1_out_buf[0] = command;
202 return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1), 202 return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1),
203 dev->ep1_out_buf, len+1, &actual_len, 200); 203 dev->ep1_out_buf, len+1, &actual_len, 200);
@@ -208,7 +208,7 @@ int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *dev,
208{ 208{
209 int ret; 209 int ret;
210 char tmp[5]; 210 char tmp[5];
211 211
212 switch (rate) { 212 switch (rate) {
213 case 44100: tmp[0] = SAMPLERATE_44100; break; 213 case 44100: tmp[0] = SAMPLERATE_44100; break;
214 case 48000: tmp[0] = SAMPLERATE_48000; break; 214 case 48000: tmp[0] = SAMPLERATE_48000; break;
@@ -237,12 +237,12 @@ int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *dev,
237 237
238 if (ret) 238 if (ret)
239 return ret; 239 return ret;
240 240
241 if (!wait_event_timeout(dev->ep1_wait_queue, 241 if (!wait_event_timeout(dev->ep1_wait_queue,
242 dev->audio_parm_answer >= 0, HZ)) 242 dev->audio_parm_answer >= 0, HZ))
243 return -EPIPE; 243 return -EPIPE;
244 244
245 if (dev->audio_parm_answer != 1) 245 if (dev->audio_parm_answer != 1)
246 debug("unable to set the device's audio params\n"); 246 debug("unable to set the device's audio params\n");
247 else 247 else
248 dev->bpp = bpp; 248 dev->bpp = bpp;
@@ -250,8 +250,8 @@ int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *dev,
250 return dev->audio_parm_answer == 1 ? 0 : -EINVAL; 250 return dev->audio_parm_answer == 1 ? 0 : -EINVAL;
251} 251}
252 252
253int snd_usb_caiaq_set_auto_msg (struct snd_usb_caiaqdev *dev, 253int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *dev,
254 int digital, int analog, int erp) 254 int digital, int analog, int erp)
255{ 255{
256 char tmp[3] = { digital, analog, erp }; 256 char tmp[3] = { digital, analog, erp };
257 return snd_usb_caiaq_send_command(dev, EP1_CMD_AUTO_MSG, 257 return snd_usb_caiaq_send_command(dev, EP1_CMD_AUTO_MSG,
@@ -262,7 +262,7 @@ static void __devinit setup_card(struct snd_usb_caiaqdev *dev)
262{ 262{
263 int ret; 263 int ret;
264 char val[4]; 264 char val[4];
265 265
266 /* device-specific startup specials */ 266 /* device-specific startup specials */
267 switch (dev->chip.usb_id) { 267 switch (dev->chip.usb_id) {
268 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2): 268 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2):
@@ -314,7 +314,7 @@ static void __devinit setup_card(struct snd_usb_caiaqdev *dev)
314 dev->control_state, 1); 314 dev->control_state, 1);
315 break; 315 break;
316 } 316 }
317 317
318 if (dev->spec.num_analog_audio_out + 318 if (dev->spec.num_analog_audio_out +
319 dev->spec.num_analog_audio_in + 319 dev->spec.num_analog_audio_in +
320 dev->spec.num_digital_audio_out + 320 dev->spec.num_digital_audio_out +
@@ -323,7 +323,7 @@ static void __devinit setup_card(struct snd_usb_caiaqdev *dev)
323 if (ret < 0) 323 if (ret < 0)
324 log("Unable to set up audio system (ret=%d)\n", ret); 324 log("Unable to set up audio system (ret=%d)\n", ret);
325 } 325 }
326 326
327 if (dev->spec.num_midi_in + 327 if (dev->spec.num_midi_in +
328 dev->spec.num_midi_out > 0) { 328 dev->spec.num_midi_out > 0) {
329 ret = snd_usb_caiaq_midi_init(dev); 329 ret = snd_usb_caiaq_midi_init(dev);
@@ -363,7 +363,7 @@ static int create_card(struct usb_device* usb_dev, struct snd_card **cardp)
363 if (devnum >= SNDRV_CARDS) 363 if (devnum >= SNDRV_CARDS)
364 return -ENODEV; 364 return -ENODEV;
365 365
366 err = snd_card_create(index[devnum], id[devnum], THIS_MODULE, 366 err = snd_card_create(index[devnum], id[devnum], THIS_MODULE,
367 sizeof(struct snd_usb_caiaqdev), &card); 367 sizeof(struct snd_usb_caiaqdev), &card);
368 if (err < 0) 368 if (err < 0)
369 return err; 369 return err;
@@ -382,11 +382,11 @@ static int create_card(struct usb_device* usb_dev, struct snd_card **cardp)
382 382
383static int __devinit init_card(struct snd_usb_caiaqdev *dev) 383static int __devinit init_card(struct snd_usb_caiaqdev *dev)
384{ 384{
385 char *c; 385 char *c, usbpath[32];
386 struct usb_device *usb_dev = dev->chip.dev; 386 struct usb_device *usb_dev = dev->chip.dev;
387 struct snd_card *card = dev->chip.card; 387 struct snd_card *card = dev->chip.card;
388 int err, len; 388 int err, len;
389 389
390 if (usb_set_interface(usb_dev, 0, 1) != 0) { 390 if (usb_set_interface(usb_dev, 0, 1) != 0) {
391 log("can't set alt interface.\n"); 391 log("can't set alt interface.\n");
392 return -EIO; 392 return -EIO;
@@ -395,19 +395,19 @@ static int __devinit init_card(struct snd_usb_caiaqdev *dev)
395 usb_init_urb(&dev->ep1_in_urb); 395 usb_init_urb(&dev->ep1_in_urb);
396 usb_init_urb(&dev->midi_out_urb); 396 usb_init_urb(&dev->midi_out_urb);
397 397
398 usb_fill_bulk_urb(&dev->ep1_in_urb, usb_dev, 398 usb_fill_bulk_urb(&dev->ep1_in_urb, usb_dev,
399 usb_rcvbulkpipe(usb_dev, 0x1), 399 usb_rcvbulkpipe(usb_dev, 0x1),
400 dev->ep1_in_buf, EP1_BUFSIZE, 400 dev->ep1_in_buf, EP1_BUFSIZE,
401 usb_ep1_command_reply_dispatch, dev); 401 usb_ep1_command_reply_dispatch, dev);
402 402
403 usb_fill_bulk_urb(&dev->midi_out_urb, usb_dev, 403 usb_fill_bulk_urb(&dev->midi_out_urb, usb_dev,
404 usb_sndbulkpipe(usb_dev, 0x1), 404 usb_sndbulkpipe(usb_dev, 0x1),
405 dev->midi_out_buf, EP1_BUFSIZE, 405 dev->midi_out_buf, EP1_BUFSIZE,
406 snd_usb_caiaq_midi_output_done, dev); 406 snd_usb_caiaq_midi_output_done, dev);
407 407
408 init_waitqueue_head(&dev->ep1_wait_queue); 408 init_waitqueue_head(&dev->ep1_wait_queue);
409 init_waitqueue_head(&dev->prepare_wait_queue); 409 init_waitqueue_head(&dev->prepare_wait_queue);
410 410
411 if (usb_submit_urb(&dev->ep1_in_urb, GFP_KERNEL) != 0) 411 if (usb_submit_urb(&dev->ep1_in_urb, GFP_KERNEL) != 0)
412 return -EIO; 412 return -EIO;
413 413
@@ -420,47 +420,52 @@ static int __devinit init_card(struct snd_usb_caiaqdev *dev)
420 420
421 usb_string(usb_dev, usb_dev->descriptor.iManufacturer, 421 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
422 dev->vendor_name, CAIAQ_USB_STR_LEN); 422 dev->vendor_name, CAIAQ_USB_STR_LEN);
423 423
424 usb_string(usb_dev, usb_dev->descriptor.iProduct, 424 usb_string(usb_dev, usb_dev->descriptor.iProduct,
425 dev->product_name, CAIAQ_USB_STR_LEN); 425 dev->product_name, CAIAQ_USB_STR_LEN);
426 426
427 usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, 427 strlcpy(card->driver, MODNAME, sizeof(card->driver));
428 dev->serial, CAIAQ_USB_STR_LEN); 428 strlcpy(card->shortname, dev->product_name, sizeof(card->shortname));
429 429 strlcpy(card->mixername, dev->product_name, sizeof(card->mixername));
430 /* terminate serial string at first white space occurence */ 430
431 c = strchr(dev->serial, ' '); 431 /* if the id was not passed as module option, fill it with a shortened
432 if (c) 432 * version of the product string which does not contain any
433 *c = '\0'; 433 * whitespaces */
434 434
435 strcpy(card->driver, MODNAME); 435 if (*card->id == '\0') {
436 strcpy(card->shortname, dev->product_name); 436 char id[sizeof(card->id)];
437 437
438 len = snprintf(card->longname, sizeof(card->longname), 438 memset(id, 0, sizeof(id));
439 "%s %s (serial %s, ", 439
440 dev->vendor_name, dev->product_name, dev->serial); 440 for (c = card->shortname, len = 0;
441 441 *c && len < sizeof(card->id); c++)
442 if (len < sizeof(card->longname) - 2) 442 if (*c != ' ')
443 len += usb_make_path(usb_dev, card->longname + len, 443 id[len++] = *c;
444 sizeof(card->longname) - len); 444
445 445 snd_card_set_id(card, id);
446 card->longname[len++] = ')'; 446 }
447 card->longname[len] = '\0'; 447
448 usb_make_path(usb_dev, usbpath, sizeof(usbpath));
449 snprintf(card->longname, sizeof(card->longname),
450 "%s %s (%s)",
451 dev->vendor_name, dev->product_name, usbpath);
452
448 setup_card(dev); 453 setup_card(dev);
449 return 0; 454 return 0;
450} 455}
451 456
452static int __devinit snd_probe(struct usb_interface *intf, 457static int __devinit snd_probe(struct usb_interface *intf,
453 const struct usb_device_id *id) 458 const struct usb_device_id *id)
454{ 459{
455 int ret; 460 int ret;
456 struct snd_card *card; 461 struct snd_card *card;
457 struct usb_device *device = interface_to_usbdev(intf); 462 struct usb_device *device = interface_to_usbdev(intf);
458 463
459 ret = create_card(device, &card); 464 ret = create_card(device, &card);
460 465
461 if (ret < 0) 466 if (ret < 0)
462 return ret; 467 return ret;
463 468
464 usb_set_intfdata(intf, card); 469 usb_set_intfdata(intf, card);
465 ret = init_card(caiaqdev(card)); 470 ret = init_card(caiaqdev(card));
466 if (ret < 0) { 471 if (ret < 0) {
@@ -468,7 +473,7 @@ static int __devinit snd_probe(struct usb_interface *intf,
468 snd_card_free(card); 473 snd_card_free(card);
469 return ret; 474 return ret;
470 } 475 }
471 476
472 return 0; 477 return 0;
473} 478}
474 479
@@ -489,10 +494,10 @@ static void snd_disconnect(struct usb_interface *intf)
489 snd_usb_caiaq_input_free(dev); 494 snd_usb_caiaq_input_free(dev);
490#endif 495#endif
491 snd_usb_caiaq_audio_free(dev); 496 snd_usb_caiaq_audio_free(dev);
492 497
493 usb_kill_urb(&dev->ep1_in_urb); 498 usb_kill_urb(&dev->ep1_in_urb);
494 usb_kill_urb(&dev->midi_out_urb); 499 usb_kill_urb(&dev->midi_out_urb);
495 500
496 snd_card_free(card); 501 snd_card_free(card);
497 usb_reset_device(interface_to_usbdev(intf)); 502 usb_reset_device(interface_to_usbdev(intf));
498} 503}
diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h
index 4cce1ad7493d..ece73514854e 100644
--- a/sound/usb/caiaq/device.h
+++ b/sound/usb/caiaq/device.h
@@ -81,7 +81,6 @@ struct snd_usb_caiaqdev {
81 81
82 char vendor_name[CAIAQ_USB_STR_LEN]; 82 char vendor_name[CAIAQ_USB_STR_LEN];
83 char product_name[CAIAQ_USB_STR_LEN]; 83 char product_name[CAIAQ_USB_STR_LEN];
84 char serial[CAIAQ_USB_STR_LEN];
85 84
86 int n_streams, n_audio_in, n_audio_out; 85 int n_streams, n_audio_in, n_audio_out;
87 int streaming, first_packet, output_running; 86 int streaming, first_packet, output_running;
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c
index 8fa8cd88d763..538e8c00d31a 100644
--- a/sound/usb/caiaq/midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -40,7 +40,7 @@ static void snd_usb_caiaq_midi_input_trigger(struct snd_rawmidi_substream *subst
40 40
41 if (!dev) 41 if (!dev)
42 return; 42 return;
43 43
44 dev->midi_receive_substream = up ? substream : NULL; 44 dev->midi_receive_substream = up ? substream : NULL;
45} 45}
46 46
@@ -64,18 +64,18 @@ static void snd_usb_caiaq_midi_send(struct snd_usb_caiaqdev *dev,
64 struct snd_rawmidi_substream *substream) 64 struct snd_rawmidi_substream *substream)
65{ 65{
66 int len, ret; 66 int len, ret;
67 67
68 dev->midi_out_buf[0] = EP1_CMD_MIDI_WRITE; 68 dev->midi_out_buf[0] = EP1_CMD_MIDI_WRITE;
69 dev->midi_out_buf[1] = 0; /* port */ 69 dev->midi_out_buf[1] = 0; /* port */
70 len = snd_rawmidi_transmit(substream, dev->midi_out_buf + 3, 70 len = snd_rawmidi_transmit(substream, dev->midi_out_buf + 3,
71 EP1_BUFSIZE - 3); 71 EP1_BUFSIZE - 3);
72 72
73 if (len <= 0) 73 if (len <= 0)
74 return; 74 return;
75 75
76 dev->midi_out_buf[2] = len; 76 dev->midi_out_buf[2] = len;
77 dev->midi_out_urb.transfer_buffer_length = len+3; 77 dev->midi_out_urb.transfer_buffer_length = len+3;
78 78
79 ret = usb_submit_urb(&dev->midi_out_urb, GFP_ATOMIC); 79 ret = usb_submit_urb(&dev->midi_out_urb, GFP_ATOMIC);
80 if (ret < 0) 80 if (ret < 0)
81 log("snd_usb_caiaq_midi_send(%p): usb_submit_urb() failed," 81 log("snd_usb_caiaq_midi_send(%p): usb_submit_urb() failed,"
@@ -88,7 +88,7 @@ static void snd_usb_caiaq_midi_send(struct snd_usb_caiaqdev *dev,
88static void snd_usb_caiaq_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) 88static void snd_usb_caiaq_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
89{ 89{
90 struct snd_usb_caiaqdev *dev = substream->rmidi->private_data; 90 struct snd_usb_caiaqdev *dev = substream->rmidi->private_data;
91 91
92 if (up) { 92 if (up) {
93 dev->midi_out_substream = substream; 93 dev->midi_out_substream = substream;
94 if (!dev->midi_out_active) 94 if (!dev->midi_out_active)
@@ -113,12 +113,12 @@ static struct snd_rawmidi_ops snd_usb_caiaq_midi_input =
113 .trigger = snd_usb_caiaq_midi_input_trigger, 113 .trigger = snd_usb_caiaq_midi_input_trigger,
114}; 114};
115 115
116void snd_usb_caiaq_midi_handle_input(struct snd_usb_caiaqdev *dev, 116void snd_usb_caiaq_midi_handle_input(struct snd_usb_caiaqdev *dev,
117 int port, const char *buf, int len) 117 int port, const char *buf, int len)
118{ 118{
119 if (!dev->midi_receive_substream) 119 if (!dev->midi_receive_substream)
120 return; 120 return;
121 121
122 snd_rawmidi_receive(dev->midi_receive_substream, buf, len); 122 snd_rawmidi_receive(dev->midi_receive_substream, buf, len);
123} 123}
124 124
@@ -142,16 +142,16 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device)
142 142
143 if (device->spec.num_midi_out > 0) { 143 if (device->spec.num_midi_out > 0) {
144 rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; 144 rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT;
145 snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, 145 snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT,
146 &snd_usb_caiaq_midi_output); 146 &snd_usb_caiaq_midi_output);
147 } 147 }
148 148
149 if (device->spec.num_midi_in > 0) { 149 if (device->spec.num_midi_in > 0) {
150 rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; 150 rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT;
151 snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, 151 snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT,
152 &snd_usb_caiaq_midi_input); 152 &snd_usb_caiaq_midi_input);
153 } 153 }
154 154
155 device->rmidi = rmidi; 155 device->rmidi = rmidi;
156 156
157 return 0; 157 return 0;
@@ -160,7 +160,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device)
160void snd_usb_caiaq_midi_output_done(struct urb* urb) 160void snd_usb_caiaq_midi_output_done(struct urb* urb)
161{ 161{
162 struct snd_usb_caiaqdev *dev = urb->context; 162 struct snd_usb_caiaqdev *dev = urb->context;
163 163
164 dev->midi_out_active = 0; 164 dev->midi_out_active = 0;
165 if (urb->status != 0) 165 if (urb->status != 0)
166 return; 166 return;
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index a6b88482637b..c7b902358b7b 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -627,6 +627,7 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
627 subs->hwptr_done += offs; 627 subs->hwptr_done += offs;
628 if (subs->hwptr_done >= runtime->buffer_size) 628 if (subs->hwptr_done >= runtime->buffer_size)
629 subs->hwptr_done -= runtime->buffer_size; 629 subs->hwptr_done -= runtime->buffer_size;
630 runtime->delay += offs;
630 spin_unlock_irqrestore(&subs->lock, flags); 631 spin_unlock_irqrestore(&subs->lock, flags);
631 urb->transfer_buffer_length = offs * stride; 632 urb->transfer_buffer_length = offs * stride;
632 if (period_elapsed) 633 if (period_elapsed)
@@ -636,12 +637,22 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
636 637
637/* 638/*
638 * process after playback data complete 639 * process after playback data complete
639 * - nothing to do 640 * - decrease the delay count again
640 */ 641 */
641static int retire_playback_urb(struct snd_usb_substream *subs, 642static int retire_playback_urb(struct snd_usb_substream *subs,
642 struct snd_pcm_runtime *runtime, 643 struct snd_pcm_runtime *runtime,
643 struct urb *urb) 644 struct urb *urb)
644{ 645{
646 unsigned long flags;
647 int stride = runtime->frame_bits >> 3;
648 int processed = urb->transfer_buffer_length / stride;
649
650 spin_lock_irqsave(&subs->lock, flags);
651 if (processed > runtime->delay)
652 runtime->delay = 0;
653 else
654 runtime->delay -= processed;
655 spin_unlock_irqrestore(&subs->lock, flags);
645 return 0; 656 return 0;
646} 657}
647 658
@@ -1520,6 +1531,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
1520 subs->hwptr_done = 0; 1531 subs->hwptr_done = 0;
1521 subs->transfer_done = 0; 1532 subs->transfer_done = 0;
1522 subs->phase = 0; 1533 subs->phase = 0;
1534 runtime->delay = 0;
1523 1535
1524 /* clear urbs (to be sure) */ 1536 /* clear urbs (to be sure) */
1525 deactivate_urbs(subs, 0, 1); 1537 deactivate_urbs(subs, 0, 1);
@@ -3279,6 +3291,25 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
3279 return snd_usb_cm106_write_int_reg(dev, 2, 0x8004); 3291 return snd_usb_cm106_write_int_reg(dev, 2, 0x8004);
3280} 3292}
3281 3293
3294/*
3295 * C-Media CM6206 is based on CM106 with two additional
3296 * registers that are not documented in the data sheet.
3297 * Values here are chosen based on sniffing USB traffic
3298 * under Windows.
3299 */
3300static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
3301{
3302 int err, reg;
3303 int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
3304
3305 for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
3306 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
3307 if (err < 0)
3308 return err;
3309 }
3310
3311 return err;
3312}
3282 3313
3283/* 3314/*
3284 * Setup quirks 3315 * Setup quirks
@@ -3565,6 +3596,12 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
3565 goto __err_val; 3596 goto __err_val;
3566 } 3597 }
3567 3598
3599 /* C-Media CM6206 / CM106-Like Sound Device */
3600 if (id == USB_ID(0x0d8c, 0x0102)) {
3601 if (snd_usb_cm6206_boot_quirk(dev) < 0)
3602 goto __err_val;
3603 }
3604
3568 /* 3605 /*
3569 * found a config. now register to ALSA 3606 * found a config. now register to ALSA
3570 */ 3607 */
diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
index 5d955aaad85f..f0f7624f9178 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -1470,6 +1470,41 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1470 } 1470 }
1471}, 1471},
1472{ 1472{
1473 /* Edirol M-16DX */
1474 /* FIXME: This quirk gives a good-working capture stream but the
1475 * playback seems problematic because of lacking of sync
1476 * with capture stream. It needs to sync with the capture
1477 * clock. As now, you'll get frequent sound distortions
1478 * via the playback.
1479 */
1480 USB_DEVICE(0x0582, 0x00c4),
1481 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1482 .ifnum = QUIRK_ANY_INTERFACE,
1483 .type = QUIRK_COMPOSITE,
1484 .data = (const struct snd_usb_audio_quirk[]) {
1485 {
1486 .ifnum = 0,
1487 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1488 },
1489 {
1490 .ifnum = 1,
1491 .type = QUIRK_AUDIO_STANDARD_INTERFACE
1492 },
1493 {
1494 .ifnum = 2,
1495 .type = QUIRK_MIDI_FIXED_ENDPOINT,
1496 .data = & (const struct snd_usb_midi_endpoint_info) {
1497 .out_cables = 0x0001,
1498 .in_cables = 0x0001
1499 }
1500 },
1501 {
1502 .ifnum = -1
1503 }
1504 }
1505 }
1506},
1507{
1473 /* BOSS GT-10 */ 1508 /* BOSS GT-10 */
1474 USB_DEVICE(0x0582, 0x00da), 1509 USB_DEVICE(0x0582, 0x00da),
1475 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 1510 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
@@ -1951,6 +1986,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1951 } 1986 }
1952}, 1987},
1953{ 1988{
1989 USB_DEVICE(0x0ccd, 0x0028),
1990 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1991 .vendor_name = "TerraTec",
1992 .product_name = "Aureon 5.1 MkII",
1993 .ifnum = QUIRK_NO_INTERFACE
1994 }
1995},
1996{
1954 USB_DEVICE(0x0ccd, 0x0035), 1997 USB_DEVICE(0x0ccd, 0x0035),
1955 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 1998 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1956 .vendor_name = "Miditech", 1999 .vendor_name = "Miditech",
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 29259e74dcfa..0f5771f615da 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -568,8 +568,11 @@ int cmd_record(int argc, const char **argv, const char *prefix)
568 if (!argc && target_pid == -1 && !system_wide) 568 if (!argc && target_pid == -1 && !system_wide)
569 usage_with_options(record_usage, options); 569 usage_with_options(record_usage, options);
570 570
571 if (!nr_counters) 571 if (!nr_counters) {
572 nr_counters = 1; 572 nr_counters = 1;
573 attrs[0].type = PERF_TYPE_HARDWARE;
574 attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
575 }
573 576
574 for (counter = 0; counter < nr_counters; counter++) { 577 for (counter = 0; counter < nr_counters; counter++) {
575 if (attrs[counter].sample_period) 578 if (attrs[counter].sample_period)
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 860e116d979c..f71e0d245cba 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -440,3 +440,18 @@ by this process or by another, and doesn't affect any counters that
440this process has created on other processes. It only enables or 440this process has created on other processes. It only enables or
441disables the group leaders, not any other members in the groups. 441disables the group leaders, not any other members in the groups.
442 442
443
444Arch requirements
445-----------------
446
447If your architecture does not have hardware performance metrics, you can
448still use the generic software counters based on hrtimers for sampling.
449
450So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you
451will need at least this:
452 - asm/perf_counter.h - a basic stub will suffice at first
453 - support for atomic64 types (and associated helper functions)
454 - set_perf_counter_pending() implemented
455
456If your architecture does have hardware capabilities, you can override the
457weak stub hw_perf_counter_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index af0a5046d743..87a1aca4a424 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -53,11 +53,12 @@ static inline unsigned long long rdclock(void)
53 _min1 < _min2 ? _min1 : _min2; }) 53 _min1 < _min2 ? _min1 : _min2; })
54 54
55static inline int 55static inline int
56sys_perf_counter_open(struct perf_counter_attr *attr_uptr, 56sys_perf_counter_open(struct perf_counter_attr *attr,
57 pid_t pid, int cpu, int group_fd, 57 pid_t pid, int cpu, int group_fd,
58 unsigned long flags) 58 unsigned long flags)
59{ 59{
60 return syscall(__NR_perf_counter_open, attr_uptr, pid, cpu, 60 attr->size = sizeof(*attr);
61 return syscall(__NR_perf_counter_open, attr, pid, cpu,
61 group_fd, flags); 62 group_fd, flags);
62} 63}
63 64
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 9d5f1ca50e6f..5a72586e1df0 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -75,7 +75,7 @@ static char *sw_event_names[] = {
75#define MAX_ALIASES 8 75#define MAX_ALIASES 8
76 76
77static char *hw_cache [][MAX_ALIASES] = { 77static char *hw_cache [][MAX_ALIASES] = {
78 { "L1-data" , "l1-d", "l1d", "l1" }, 78 { "L1-data" , "l1-d", "l1d" },
79 { "L1-instruction" , "l1-i", "l1i" }, 79 { "L1-instruction" , "l1-i", "l1i" },
80 { "L2" , "l2" }, 80 { "L2" , "l2" },
81 { "Data-TLB" , "dtlb", "d-tlb" }, 81 { "Data-TLB" , "dtlb", "d-tlb" },